Exemplo n.º 1
0
def integrate_trap(fc,u,t1,t2,N,implementation=2):
    grid = np.linspace(t1,t2,N+1)
    dt = grid[1] - grid[0]
    fs = [fc(u,t) for t in grid] # f on grid
    if type(fs[0]) == casadi.SXMatrix:
        assert fs[0].numel() == 1
    hs = [fabs(f) for f in fs] # absolute values of f on grid
    F=0
    if implementation==1:
        # This formulation theoretically allows short-circuiting, branch prediction
        for i in xrange(len(fs)-1):
            fa=fs[i]
            fb=fs[i+1]
            ha=hs(i)
            hb=hs(i+1)
            samesign=casadi.sign(fa)==casadi.sign(fb)
            F=F+casadi.if_else(samesign,trapezoid_area(ha,hb,dt),
                               bowtie_area(ha,hb,dt))

    if implementation==2:
        # This formulation theoretically allows use of SIMD (vector) extensions
        for i in xrange(len(fs)-1):
            fa=fs[i]
            fb=fs[i+1]
            ha=hs[i]
            hb=hs[i+1]
            ha_plus_hb=ha+hb
            F=F+ha_plus_hb + (fa*fb-ha*hb)/ha_plus_hb

        F=F*dt/2
    if type(F) == casadi.SXMatrix:
        assert F.numel() == 1
    return F,grid
Exemplo n.º 2
0
def integrate_rect(fc,u,t1,t2,N):
    grid = np.linspace(t1,t2,N+1)
    dt = grid[1] - grid[0]
    fs = [fc(u,t+dt/2) for t in grid[:-1]] # f at the midpoints
    Fi = [fabs(f) for f in fs]
    F = dt*sum(Fi)
    assert F.numel() == 1
    return F,grid
Exemplo n.º 3
0
    def getViolations(self,g,lbg,ubg,reportThreshold=0,reportEqViolations=False):
        """
        Tests if g >= ubg + reportThreshold
                 g <= lbg - reportThreshold
        Positive reportThreshold supresses barely active bounds
        Negative reportThreshold reports not-quite-active bounds
        """
        violations = {}

        ubviols = g - ubg
        lbviols = lbg - g
        ubviolsIdx = np.where(C.logic_and(ubviols >= reportThreshold, ubg > lbg))[0]
        lbviolsIdx = np.where(C.logic_and(lbviols >= reportThreshold, ubg > lbg))[0]

        eqviolsIdx = []
        if reportEqViolations:
            eqviolsIdx = np.where(C.logic_and(C.fabs(ubviols) >= reportThreshold, ubg == lbg))[0]

        violations = {}
        for k in ubviolsIdx:
            (name,time,idx) = self._tags[k]
            viol = ('ub',(time,idx),float(ubviols[k]))#,g[k],ubg[k])
            if name not in violations:
                violations[name] = [viol]
            else:
                violations[name].append(viol)
        for k in lbviolsIdx:
            (name,time,idx) = self._tags[k]
            viol = ('lb',(time,idx),float(lbviols[k]))#,g[k],lbg[k])
            if name not in violations:
                violations[name] = [viol]
            else:
                violations[name].append(viol)
        for k in eqviolsIdx:
            (name,time,idx) = self._tags[k]
            viol = ('eq',(time,idx),float(ubviols[k]))#,g[k],lbg[k])
            if name not in violations:
                violations[name] = [viol]
            else:
                violations[name].append(viol)
        return violations
Exemplo n.º 4
0
    def set_data(self, data):
        """ Attach experimental measurement data.

        data : a pd.DataFrame object
            Data should have columns corresponding to the state labels in
            self.state_names, with an index corresponding to the measurement
            times.

        """

        # Should raise an error if no state name is present
        df = data.loc[:, self.state_names]

        # Rename columns with state indicies
        df.columns = np.arange(self.NEQ)

        # Remove empty (nonmeasured) states
        self.data = df.loc[:, ~pd.isnull(df).all(0)]

        obj_list = []
        for ((ti, state), xi) in self.data.stack().iteritems():
            obj_list += [(self._get_interp(ti, [state]) - xi) / self.data[state].max()]

        obj_resid = cs.sum_square(cs.vertcat(obj_list))

        # ts = data.index

        # Define objective function
        # obj_resid = cs.sum_square(cs.vertcat(
        #     [(self._get_interp(ti, self._states_indicies) - xi)/ xs.max(0)
        #      for ti, xi in zip(ts, xs)]))

        alpha = cs.SX.sym("alpha")
        obj_lasso = alpha * cs.sumRows(cs.fabs(self._P))

        self._obj = obj_resid + obj_lasso

        # Create the solver object
        self._nlp = cs.SXFunction("nlp", cs.nlpIn(x=self._V, p=alpha), cs.nlpOut(f=self._obj, g=self._g))
Exemplo n.º 5
0
def qp_solve_2(prob, p_init, x_init, y_init, step, lb, ub, N, x0, lb_init, ub_init):
    """
    QP solver for path-following algorithm
    inputs: prob - problem description
            p - parameters
            x_init - initial primal variable
            y_init - initial dual variable
            step - step to be taken (in p)
            lb_init - lower bounds
            ub_init - upper bounds
            verbose_level - amount of output text
            N - iteration number
    outputs: y - solution primal variable
            qp_val - objective function value
            qp_exit - return status of QP solver
                
    """
    #Importing problem to be solved

    neq = prob['neq']                           #Number of equality constraints
    niq = prob['niq']                         #Number of inequality constraints
    name = prob['name']                                        #Name of problem
    _, g, H, Lxp, cst, _, _, Jeq, dpe, _ = objective(x_init,y_init,p_init,N,params) #objective function

    #Setting up QP
    f = mtimes(Lxp,step) + g
    
    #Constraints
    ceq = cst
    Aeq = Jeq
    beq = mtimes(dpe,step) + ceq

    #Check Lagrange multipliers from bound constraints
    lamC = fabs(y_init['lam_x'])
    BAC = where(lamC >= 1e-3) #setting limits to determine if constraint is active

    #Finding active constraints
    numBAC = len(BAC)
    for i in range(0,numBAC):
        #Placing strongly active constraint on boundary
        indB = BAC[i]
        #Keeping upper bound on boundary
        ub[indB] = 0
        lb[indB] = 0

    #Solving the QP
    """
        minimize:
        (1/2)*x'*H*x + f'*x
        subject to:
        Aeq*x = beq
        lb <= x <= ub
    """
    #converting to cvxopt-style matrices
    def _convert(H, f, Aeq, beq, lb, ub, x0):
        """ Convert everything to cvxopt-style matrices """
        P = cvxmat(H)
        print H[0]
        print P[0]
        raw_input()
        q = cvxmat(f)
        A = cvxmat(Aeq)
        b = cvxmat(beq)
        n = lb.size
        G = sparse([-speye(n),speye(n)])
        h = cvxmat(np.vstack[-lb,ub])
        x0 = cvxmat(x0)
        return P, q, G, h, A, b, x0

    def speye(n):
        """Create a sparse identity matrix"""
        r = range(n)
        return spmatrix(1.0, r, r)

    P, q, G, h, A, b, x0 = _convert(H, f, Aeq, beq, lb, ub, x0)
    print P[0]
    raw_input()
    startqp = time.time()
    results = qp(P, q, G, h, A, b, x0)
    print results
    raw_input()
    
    elapsedqp = time.time()-startqp
    x_qpopt = optimal['x']
    if x_qpopt.shape == x_init.shape:
        qp_exit = 'optimal'
    else:
        qp_exit = ''
Exemplo n.º 6
0
    def _initialize_mav_objective(self):
        """ Initialize the objective function to minimize the absolute value of
        the flux vector """

        self.objective_sx += (self.pvar.alpha_sx *
                              cs.fabs(self.var.p_sx).sum())
Exemplo n.º 7
0
    def calculate_consistent_state(self,
                                   model,
                                   time=0,
                                   y0_guess=None,
                                   inputs=None):
        """
        Calculate consistent state for the algebraic equations through
        root-finding

        Parameters
        ----------
        model : :class:`pybamm.BaseModel`
            The model for which to calculate initial conditions.
        time : float
            The time at which to calculate the states
        y0_guess : :class:`np.array`
            Guess for the rootfinding
        inputs : dict, optional
            Any input parameters to pass to the model when solving

        Returns
        -------
        y0_consistent : array-like, same shape as y0_guess
            Initial conditions that are consistent with the algebraic equations (roots
            of the algebraic equations)
        """
        pybamm.logger.info("Start calculating consistent states")
        if y0_guess is None:
            y0_guess = model.concatenated_initial_conditions.flatten()

        inputs = inputs or {}
        if model.convert_to_format == "casadi":
            inputs = casadi.vertcat(*[x for x in inputs.values()])

        # Split y0_guess into differential and algebraic
        len_rhs = model.rhs_eval(time, y0_guess, inputs).shape[0]
        y0_diff, y0_alg_guess = np.split(y0_guess, [len_rhs])

        # Solve using casadi or scipy
        if self.root_method == "casadi":
            # Set up
            p = casadi.MX.sym("p", inputs.shape[0])
            y_alg = casadi.MX.sym("y_alg", y0_alg_guess.shape[0])
            y = casadi.vertcat(y0_diff, y_alg)
            alg_root = model.casadi_algebraic(time, y, p)
            # Solve
            roots = casadi.rootfinder(
                "roots",
                "newton",
                dict(x=y_alg, p=p, g=alg_root),
                {"abstol": self.root_tol},
            )
            try:
                y0_alg = roots(y0_alg_guess, inputs).full().flatten()
                success = True
                message = None
                # Check final output
                fun = model.casadi_algebraic(time,
                                             casadi.vertcat(y0_diff, y0_alg),
                                             inputs)
                abs_fun = casadi.fabs(fun)
                max_fun = casadi.mmax(fun)
            except RuntimeError as err:
                success = False
                message = err.args[0]
                abs_fun = None
                max_fun = None
        else:
            algebraic = model.algebraic_eval
            jac = model.jac_algebraic_eval

            def root_fun(y0_alg):
                "Evaluates algebraic using y0_diff (fixed) and y0_alg (changed by algo)"
                y0 = np.concatenate([y0_diff, y0_alg])
                out = algebraic(time, y0, inputs)
                pybamm.logger.debug(
                    "Evaluating algebraic equations at t={}, L2-norm is {}".
                    format(time * model.timescale, np.linalg.norm(out)))
                return out

            if jac:
                if issparse(jac(0, y0_guess, inputs)):

                    def jac_fn(y0_alg):
                        """
                        Evaluates jacobian using y0_diff (fixed) and y0_alg (varying)
                        """
                        y0 = np.concatenate([y0_diff, y0_alg])
                        return jac(0, y0, inputs)[:, len_rhs:].toarray()

                else:

                    def jac_fn(y0_alg):
                        """
                        Evaluates jacobian using y0_diff (fixed) and y0_alg (varying)
                        """
                        y0 = np.concatenate([y0_diff, y0_alg])
                        return jac(0, y0, inputs)[:, len_rhs:]

            else:
                jac_fn = None
            # Find the values of y0_alg that are roots of the algebraic equations
            sol = optimize.root(
                root_fun,
                y0_alg_guess,
                jac=jac_fn,
                method=self.root_method,
                tol=self.root_tol,
            )
            pybamm.citations.register("virtanen2020scipy")

            # Set outputs
            y0_alg = sol.x
            success = sol.success
            fun = sol.fun
            abs_fun = np.abs(fun)
            max_fun = np.max(fun)
            message = sol.message

        if success and np.all(abs_fun < self.root_tol):
            # Return full set of consistent initial conditions (y0_diff unchanged)
            y0_consistent = np.concatenate([y0_diff, y0_alg])
            pybamm.logger.info(
                "Finish calculating consistent initial conditions")
            return y0_consistent
        elif not success:
            raise pybamm.SolverError(
                "Could not find consistent initial conditions: {}".format(
                    message))
        else:
            raise pybamm.SolverError("""
                Could not find consistent initial conditions: solver terminated
                successfully, but maximum solution error ({}) above tolerance ({})
                """.format(max_fun, self.root_tol))
Exemplo n.º 8
0
class SE3:
    '''
    Implementation of the mathematical group SE3, representing
    the 3D rigid body transformations
    '''

    group_shape = (4, 4)
    group_params = 12
    algebra_params = 6

    # coefficients
    x = ca.SX.sym('x')
    C1 = ca.Function('a', [x], [
        ca.if_else(ca.fabs(x) < eps, 1 - x**2 / 6 + x**4 / 120,
                   ca.sin(x) / x)
    ])
    C2 = ca.Function('b', [x], [
        ca.if_else(
            ca.fabs(x) < eps, 0.5 - x**2 / 24 + x**4 / 720,
            (1 - ca.cos(x)) / x**2)
    ])
    C3 = ca.Function('d', [x], [
        ca.if_else(
            ca.fabs(x) < eps, 0.5 + x**2 / 12 + 7 * x**4 / 720, x /
            (2 * ca.sin(x)))
    ])
    C4 = ca.Function('f', [x], [
        ca.if_else(
            ca.fabs(x) < eps, (1 / 6) - x**2 / 120 + x**4 / 5040,
            (1 - C1(x)) / (x**2))
    ])
    del x

    def __init__(self, SO3=None):
        if SO3 == None:
            self.SO3 = so3.Dcm()

    @classmethod
    def check_group_shape(cls, a):
        assert a.shape == cls.group_shape or a.shape == (cls.group_shape[0], )

    @classmethod
    def vee(cls, X):
        '''
        This takes in an element of the SE3 Lie Group and returns the se3 Lie Algebra elements 
        '''
        v = ca.SX(6, 1)
        v[0, 0] = X[2, 1]
        v[1, 0] = X[0, 2]
        v[2, 0] = X[1, 0]
        v[3, 0] = X[0, 3]
        v[4, 0] = X[1, 3]
        v[5, 0] = X[2, 3]
        return v

    @classmethod
    def wedge(cls, v):
        '''
        This takes in an element of the se3 Lie Algebra and returns the SE3 Lie Group elements
        '''
        X = ca.SX.zeros(4, 4)
        X[0, 1] = -v[2]
        X[0, 2] = v[1]
        X[1, 0] = v[2]
        X[1, 2] = -v[0]
        X[2, 0] = -v[1]
        X[2, 1] = v[0]
        X[0, 3] = v[3]
        X[1, 3] = v[4]
        X[2, 3] = v[5]
        return X

    @classmethod
    def product(cls, a, b):
        cls.check_group_shape(a)
        cls.check_group_shape(b)
        return ca.mtimes(a, b)

    @classmethod
    def inv(cls, a):
        # TODO
        cls.check_group_shape(a)
        return ca.transpose(a)

    @classmethod
    def exp(cls, v):
        v = cls.vee(v)
        v_so3 = v[:3]
        X_so3 = so3.wedge(v_so3)
        theta = ca.norm_2(so3.vee(X_so3))

        # translational components u
        u = ca.SX(3, 1)
        u[0, 0] = v[3]
        u[1, 0] = v[4]
        u[2, 0] = v[5]

        R = so3.Dcm.exp(v_so3)
        V = ca.SX.eye(3) + cls.C2(theta) * X_so3 + cls.C4(theta) * ca.mtimes(
            X_so3, X_so3)
        horz = ca.horzcat(R, ca.mtimes(V, u))

        lastRow = ca.horzcat(0, 0, 0, 1)

        return ca.vertcat(horz, lastRow)

    @classmethod
    def log(cls, G):
        theta = ca.arccos(((G[0, 0] + G[1, 1] + G[2, 2]) - 1) / 2)
        wSkew = so3.wedge(so3.Dcm.log(G[:3, :3]))
        V_inv = ca.SX.eye(3) - 0.5 * wSkew + (1 / (theta**2)) * (1 - (
            (cls.C1(theta)) / (2 * cls.C2(theta)))) * ca.mtimes(wSkew, wSkew)

        # t is translational component vector
        t = ca.SX(3, 1)
        t[0, 0] = G[0, 3]
        t[1, 0] = G[1, 3]
        t[2, 0] = G[2, 3]

        uInv = ca.mtimes(V_inv, t)
        horz2 = ca.horzcat(wSkew, uInv)
        lastRow2 = ca.horzcat(0, 0, 0, 0)

        return ca.vertcat(horz2, lastRow2)

    @classmethod
    def kinematics(cls, R, w):
        # TODO
        assert R.shape == (3, 3)
        assert w.shape == (3, 1)
        return ca.mtimes(R, cls.wedge(w))
Exemplo n.º 9
0
def firefly_CLA_and_CDA_fuse_hybrid(  # TODO remove
    fuse_fineness_ratio,
    fuse_boattail_angle,
    fuse_TE_diameter,
    fuse_length,
    fuse_diameter,
    alpha,
    V,
    mach,
    rho,
    mu,
):
    """
    Estimated equiv. lift area and equiv. drag area of the Firefly fuselage, component buildup.
    :param fuse_fineness_ratio: Fineness ratio of the fuselage nose (length / diameter)
    :param fuse_boattail_angle: Boattail half-angle [deg]
    :param fuse_TE_diameter: Diameter of the fuselage's base at the "trailing edge" [m]
    :param fuse_length: Length of the fuselage [m]
    :param fuse_diameter: Diameter of the fuselage [m]
    :param alpha: Angle of attack [deg]
    :param V: Airspeed [m/s]
    :param mach: Mach number [unitless]
    :param rho: Air density [kg/m^3]
    :param mu: Dynamic viscosity of air [Pa*s]
    :return: A tuple of (CLA, CDA) [m^2]
    """
    alpha_rad = alpha * np.pi / 180
    sin_alpha = cas.sin(alpha_rad)
    cos_alpha = cas.cos(alpha_rad)
    """
    Lift of a truncated fuselage, following slender body theory.
    """
    separation_location = 0.3  # 0 for separation at trailing edge, 1 for separation at start of boat-tail. Calibrate this.
    diameter_at_separation = (
        1 - separation_location
    ) * fuse_TE_diameter + separation_location * fuse_diameter

    fuse_TE_area = np.pi / 4 * diameter_at_separation**2
    CLA_slender_body = 2 * fuse_TE_area * alpha_rad  # Derived from FVA 6.6.5, Eq. 6.75
    """
    Crossflow force, following the method of Jorgensen, 1977: "Prediction of Static Aero. Chars. for Slender..."
    """
    V_crossflow = V * sin_alpha
    Re_crossflow = rho * cas.fabs(V_crossflow) * fuse_diameter / mu
    mach_crossflow = mach * cas.fabs(sin_alpha)
    eta = 1  # TODO make this a function of overall fineness ratio
    Cdn = 0.2  # Taken from suggestion for supercritical cylinders in Hoerner's Fluid Dynamic Drag, pg. 3-11
    S_planform = fuse_diameter * fuse_length
    CNA = eta * Cdn * S_planform * sin_alpha * cas.fabs(sin_alpha)
    CLA_crossflow = CNA * cos_alpha
    CDA_crossflow = CNA * sin_alpha

    CLA = CLA_slender_body + CLA_crossflow
    """
    Zero-lift_force drag_force
    Model derived from high-fidelity data at: C:\Projects\GitHub\firefly_aerodynamics\Design_Opt\studies\Circular Firefly Fuse CFD\Zero-Lift Drag
    """
    c = np.array([
        112.57153128951402720758778741583,
        -7.1720570832587240417410612280946,
        -0.01765596807595304351679033061373,
        0.0026135564778264172743071913629365,
        550.8775012129947299399645999074,
        3.3166868391027000129156476759817,
        11774.081980549422951298765838146,
        3073258.204571904614567756652832,
        0.0299,
    ])  # coefficients
    fuse_Re = rho * cas.fabs(V) * fuse_length / mu
    CDA_zero_lift = (
        (c[0] * cas.exp(c[1] * fuse_fineness_ratio) +
         c[2] * fuse_boattail_angle + c[3] * fuse_boattail_angle**2 +
         c[4] * fuse_TE_diameter**2 + c[5]) / c[6] *
        (fuse_Re / c[7])**(-1 / 7) * (fuse_length * fuse_diameter) / c[8])
    """
    Assumes a ring-shaped wake projection into the Trefftz plane with a sinusoidal circulation distribution.
    This results in a uniform grad(phi) within the ring in the Trefftz plane.
    Derivation at C:\Projects\GitHub\firefly_aerodynamics\Gists and Ideas\Ring Wing Potential Flow
    """
    fuse_oswalds_efficiency = 0.5
    CDiA_slender_body = CLA**2 / (
        diameter_at_separation**2 * np.pi *
        fuse_oswalds_efficiency) / 2  # or equivalently, use the next line
    # CDiA_slender_body = fuse_TE_area * alpha_rad ** 2 / 2

    CDA = CDA_crossflow + CDiA_slender_body + CDA_zero_lift

    return CLA, CDA
Exemplo n.º 10
0
 def test_length(self, element):
     # test against parent's implementation
     assert cas.fabs(element['obj'].length() -
                     element['base'].length()) < EPSILON
Exemplo n.º 11
0
def rocket_equations(jit=True):
    x = ca.SX.sym('x', 14)
    u = ca.SX.sym('u', 4)
    p = ca.SX.sym('p', 16)
    t = ca.SX.sym('t')
    dt = ca.SX.sym('dt')

    # State: x
    # body frame: Forward, Right, Down
    omega_b = x[0:3]  # inertial angular velocity expressed in body frame
    r_nb = x[3:7]  # modified rodrigues parameters
    v_b = x[7:10]  # inertial velocity expressed in body components
    p_n = x[10:13]  # positon in nav frame
    m_fuel = x[13]  # mass

    # Input: u
    m_dot = ca.if_else(m_fuel > 0, u[0], 0)
    fin = u_to_fin(u)

    # Parameters: p
    g = p[0]  # gravity
    Jx = p[1]  # moment of inertia
    Jy = p[2]
    Jz = p[3]
    Jxz = p[4]
    ve = p[5]
    l_fin = p[6]
    w_fin = p[7]
    CL_alpha = p[8]
    CL0 = p[9]
    CD0 = p[10]
    K = p[11]
    s_fin = p[12]
    rho = p[13]
    m_empty = p[14]
    l_motor = p[15]

    # Calculations
    m = m_empty + m_fuel
    J_b = ca.SX.zeros(3, 3)
    J_b[0, 0] = Jx + m_fuel * l_motor**2
    J_b[1, 1] = Jy + m_fuel * l_motor**2
    J_b[2, 2] = Jz
    J_b[0, 2] = J_b[2, 0] = Jxz

    C_nb = so3.Dcm.from_mrp(r_nb)
    g_n = ca.vertcat(0, 0, g)
    v_n = ca.mtimes(C_nb, v_b)

    # aerodynamics
    VT = ca.norm_2(v_b)
    q = 0.5 * rho * ca.dot(v_b, v_b)
    fins = {
        'top': {
            'fwd': [1, 0, 0],
            'up': [0, 1, 0],
            'angle': fin[0]
        },
        'left': {
            'fwd': [1, 0, 0],
            'up': [0, 0, -1],
            'angle': fin[1]
        },
        'down': {
            'fwd': [1, 0, 0],
            'up': [0, -1, 0],
            'angle': fin[2]
        },
        'right': {
            'fwd': [1, 0, 0],
            'up': [0, 0, 1],
            'angle': fin[3]
        },
    }
    rel_wind_dir = v_b / VT

    # build fin lift/drag forces
    vel_tol = 1e-3
    FA_b = ca.vertcat(0, 0, 0)
    MA_b = ca.vertcat(0, 0, 0)
    for key, data in fins.items():
        fwd = data['fwd']
        up = data['up']
        angle = data['angle']
        U = ca.dot(fwd, v_b)
        W = ca.dot(up, v_b)
        side = ca.cross(fwd, up)
        alpha = ca.if_else(ca.fabs(U) > vel_tol, -ca.atan(W / U), 0)
        perp_wind_dir = ca.cross(side, rel_wind_dir)
        norm_perp = ca.norm_2(perp_wind_dir)
        perp_wind_dir = ca.if_else(
            ca.fabs(norm_perp) > vel_tol, perp_wind_dir / norm_perp, up)
        CL = CL0 + CL_alpha * (alpha + angle)
        CD = CD0 + K * (CL - CL0)**2
        # model stall as no lift if above 23 deg.
        L = ca.if_else(ca.fabs(alpha) < 0.4, CL * q * s_fin, 0)
        D = CD * q * s_fin
        FAi_b = L * perp_wind_dir - D * rel_wind_dir
        FA_b += FAi_b
        MA_b += ca.cross(-l_fin * fwd - w_fin * side, FAi_b)

    FA_b = ca.if_else(ca.fabs(VT) > vel_tol, FA_b, ca.SX.zeros(3))
    MA_b = ca.if_else(ca.fabs(VT) > vel_tol, MA_b, ca.SX.zeros(3))

    # propulsion
    FP_b = ca.vertcat(m_dot * ve, 0, 0)

    # force and momental total
    F_b = FA_b + FP_b + ca.mtimes(C_nb.T, m * g_n)
    M_b = MA_b

    force_moment = ca.Function('force_moment', [x, u, p], [F_b, M_b],
                               ['x', 'u', 'p'], ['F_b', 'M_b'])

    # right hand side
    rhs = ca.Function('rhs', [x, u, p], [
        ca.vertcat(
            ca.mtimes(ca.inv(J_b),
                      M_b - ca.cross(omega_b, ca.mtimes(J_b, omega_b))),
            so3.Mrp.kinematics(r_nb, omega_b),
            F_b / m - ca.cross(omega_b, v_b), ca.mtimes(C_nb, v_b), -m_dot)
    ], ['x', 'u', 'p'], ['rhs'], {'jit': jit})

    # prediction
    t0 = ca.SX.sym('t0')
    h = ca.SX.sym('h')
    x0 = ca.SX.sym('x', 14)
    x1 = rk4(lambda t, x: rhs(x, u, p), t0, x0, h)
    x1[3:7] = so3.Mrp.shadow_if_necessary(x1[3:7])
    predict = ca.Function('predict', [x0, u, p, t0, h], [x1], {'jit': jit})

    def schedule(t, start, ty_pairs):
        val = start
        for ti, yi in ty_pairs:
            val = ca.if_else(t > ti, yi, val)
        return val

    # reference trajectory
    pitch_d = 1.0

    euler = so3.Euler.from_mrp(r_nb)  # roll, pitch, yaw
    pitch = euler[1]

    # control
    u_control = ca.SX.zeros(4)
    # these controls are just test controls to make sure the fins are working

    u_control[0] = 0.1  # mass flow rate

    import control
    s = control.tf([1, 0], [0, 1])
    H = 140 * (s + 50) * (s + 50) / (s * (2138 * s + 208.8))
    Hd = control.tf2ss(control.c2d(H, 0.01))

    theta_c = (100 - p_n[2]) * (0.01) / (v_b[2] * ca.cos(p_n[2]))

    x_elev = ca.SX.sym('x_elev', 2)
    u_elev = ca.SX.sym('u_elev', 1)
    x_1 = ca.mtimes(Hd.A, x_elev) + ca.mtimes(Hd.B, u_elev)
    y_elev = ca.mtimes(Hd.C, x_elev) + ca.mtimes(Hd.D, u_elev)

    elev_c = (theta_c - p_n[2]) * y_elev / (0.5 * rho * v_b[2]**2)

    u_control[0] = 0.1  # mass flow rate
    u_control[1] = 0
    u_control[2] = elev_c
    u_control[3] = 0

    control = ca.Function('control', [x, p, t, dt], [u_control],
                          ['x', 'p', 't', 'dt'], ['u'])

    # initialize
    pitch_deg = ca.SX.sym('pitch_deg')
    omega0_b = ca.vertcat(0, 0, 0)
    r0_nb = so3.Mrp.from_euler(ca.vertcat(0, pitch_deg * ca.pi / 180, 0))
    v0_b = ca.vertcat(0, 0, 0)
    p0_n = ca.vertcat(0, 0, 0)
    m0_fuel = 0.8
    # x: omega_b, r_nb, v_b, p_n, m_fuel
    x0 = ca.vertcat(omega0_b, r0_nb, v0_b, p0_n, m0_fuel)
    #     g, Jx, Jy, Jz, Jxz, ve, l_fin, w_fin, CL_alpha, CL0, CD0, K, s, rho, m_emptpy, l_motor
    p0 = [
        9.8, 0.05, 1.0, 1.0, 0.0, 350, 1.0, 0.05, 2 * np.pi, 0, 0.01, 0.01,
        0.05, 1.225, 0.2, 1.0
    ]
    initialize = ca.Function('initialize', [pitch_deg], [x0, p0])

    return {
        'rhs': rhs,
        'predict': predict,
        'control': control,
        'initialize': initialize,
        'force_moment': force_moment,
        'x': x,
        'u': u,
        'p': p
    }
Exemplo n.º 12
0
    def runSolver(self, U, trajTrue=None):
        # make sure all bounds are set
        (xMissing, pMissing) = self._guessMap.getMissing()
        msg = []
        for name in xMissing:
            msg.append("you forgot to set a guess for \"" + name +
                       "\" at timesteps: " + str(xMissing[name]))
        for name in pMissing:
            msg.append("you forgot to set a guess for \"" + name + "\"")
        if len(msg) > 0:
            raise ValueError('\n'.join(msg))

        lbx, ubx = zip(*(self._boundMap.vectorize()))
        xk = C.DMatrix(list(self._guessMap.vectorize()))

        for k in range(100):
            ############# plot stuff ###############
            print "iteration: ", k
            #            import nmheMaps
            #            xOpt = np.array(xk).squeeze()
            #            traj = nmheMaps.VectorizedReadOnlyNmheMap(self.dae,self.nk,xOpt)
            #
            #            xsT =  np.array([trajTrue.lookup('x',timestep=kk) for kk in range(self.nk+1)] )
            #            ysT =  np.array([trajTrue.lookup('y',timestep=kk) for kk in range(self.nk+1)] )
            #            zsT =  np.array([trajTrue.lookup('z',timestep=kk) for kk in range(self.nk+1)] )
            #
            #            xs =  np.array([traj.lookup('x',timestep=kk) for kk in range(self.nk+1)] )
            #            ys =  np.array([traj.lookup('y',timestep=kk) for kk in range(self.nk+1)] )
            #            zs =  np.array([traj.lookup('z',timestep=kk) for kk in range(self.nk+1)] )
            #
            #            outputMap = nmheMaps.NmheOutputMap(self._outputMapGenerator, xOpt, U)
            #            c = np.array([outputMap.lookup('c',timestep=kk) for kk in range(self.nk)])
            #            cdot = np.array([outputMap.lookup('cdot',timestep=kk) for kk in range(self.nk)])
            #
            #            figure()
            #            title(str(float(k)))
            #            subplot(3,2,1)
            #            plot(xs)
            #            plot(xsT)
            #            ylabel('x '+str(k))
            #
            #            subplot(3,2,3)
            #            plot(ys)
            #            plot(ysT)
            #            ylabel('y '+str(k))
            #
            #            subplot(3,2,5)
            #            plot(zs)
            #            plot(zsT)
            #            ylabel('z '+str(k))
            #
            ##            subplot(2,2,2)
            ##            plot(dxs,-dzs)
            ##            ylabel('vel')
            ##            axis('equal')
            #
            #            subplot(3,2,2)
            #            plot(c)
            #            ylabel('c')
            #
            #            subplot(3,2,4)
            #            plot(cdot)
            #            ylabel('cdot')
            #            ##########################################

            self.masterFun.setInput(xk, 0)
            self.masterFun.setInput(U, 1)
            t0 = time.time()
            try:
                self.masterFun.evaluate()
            except RuntimeError as e:
                print "ERRRRRRRRRRRRROR"
                show()
                raise e

            t1 = time.time()
            masterFunTime = (t1 - t0) * 1000
            hessL = self.masterFun.output(0)
            gradF = self.masterFun.output(1)
            g = self.masterFun.output(2)
            jacobG = self.masterFun.output(3)
            f = self.masterFun.output(4)

            self.qp.setInput(0, C.QP_X_INIT)
            self.qp.setInput(hessL, C.QP_H)
            self.qp.setInput(jacobG, C.QP_A)
            self.qp.setInput(gradF, C.QP_G)

            assert all((lbx - xk) <= 0), "lower bounds violation"
            assert all((ubx - xk) >= 0), "upper bounds violation"
            self.qp.setInput(lbx - xk, C.QP_LBX)
            self.qp.setInput(ubx - xk, C.QP_UBX)

            self.qp.setInput(self.glb - g, C.QP_LBA)
            self.qp.setInput(self.gub - g, C.QP_UBA)

            t0 = time.time()
            self.qp.evaluate()
            t1 = time.time()

            #            print "gradF: ",gradF
            #            print 'dim(jacobG): "gra
            #            print "rank: ",np.linalg.matrix_rank(jacobG)
            print "masterFun delta time: %.3f ms" % masterFunTime
            print "f: ", f, '\tmax constraint: ', max(C.fabs(g))
            print "qp delta time: %.3f ms" % ((t1 - t0) * 1000)
            print ""
            deltaX = self.qp.output(C.QP_PRIMAL)

            #            import scipy.io
            #            scipy.io.savemat('hessL.mat',{'hessL':np.array(hessL),
            #                                          'gradF':np.array(gradF),
            #                                          'x0':0*np.array(deltaX),
            #                                          'xopt':np.array(deltaX),
            #                                          'lbx':np.array(lbx-xk),
            #                                          'ubx':np.array(ubx-xk),
            #                                          'jacobG':np.array(jacobG),
            #                                          'lba':np.array(self.glb-g),
            #                                          'uba':np.array(self.gub-g)})
            #            import sys; sys.exit()

            #            print deltaX
            xk += deltaX
Exemplo n.º 13
0
    def collocation_solver_setup(self, warmstart=False):
        """
        Sets up NLP for collocation solution. Constructs initial guess
        arrays, constructs constraint and objective functions, and
        otherwise passes arguments to the correct places. This looks
        really inefficient and is likely unneccessary to run multiple
        times for repeated runs with new data. Not sure how much time it
        takes compared to the NLP solution.
        Run immediately before CollocationSolve.
        """
        
        # Dimensions of the problem
        nx    = self.NVAR # total number of states
        ndiff = nx        # number of differential states
        nalg  = 0         # number of algebraic states
        nu    = 0         # number of controls

        # Collocated variables
        NXD = self.nk*(self.deg+1)*ndiff # differential states 
        NXA = self.nk*self.deg*nalg      # algebraic states
        NU  = self.nk*nu                 # Parametrized controls
        NV  = NXD+NXA+NU+self.ParamCount+self.NMP # Total variables
        self.NV = NV

        # NLP variable vector
        V = cs.msym("V",NV)
          
        # All variables with bounds and initial guess
        vars_lb   = np.zeros(NV)
        vars_ub   = np.zeros(NV)
        vars_init = np.zeros(NV)
        offset    = 0
        
        ## I AM HERE ##
        
        
        #
        # Split NLP vector into useable slices
        #
        # Get the parameters
        P = V[offset:offset+self.ParamCount]
        vars_init[offset:offset+self.ParamCount] = self.NLPdata['p_init']
        vars_lb[offset:offset+self.ParamCount]   = self.NLPdata['p_min']
        vars_ub[offset:offset+self.ParamCount]   = self.NLPdata['p_max']

        # Initial conditions for measurement adjustment
        MP = V[self.NV-self.NMP:]
        vars_init[self.NV-self.NMP:] = np.ones(self.NMP)
        vars_lb[self.NV-self.NMP:] = 0.1*np.ones(self.NMP) 
        vars_ub[self.NV-self.NMP:] = 10*np.ones(self.NMP)


        pdb.set_trace()
        
        offset += self.np # indexing variable

        # Get collocated states and parametrized control
        XD = np.resize(np.array([], dtype=cs.MX), (self.nk, self.points_per_interval,
                                                   self.deg+1)) 
        # NB: same name as above
        XA = np.resize(np.array([],dtype=cs.MX),(self.nk,self.points_per_interval,self.deg)) 
        # NB: same name as above
        U = np.resize(np.array([],dtype=cs.MX),self.nk)

        # Prepare the starting data matrix vars_init, vars_ub, and
        # vars_lb, by looping over finite elements, states, etc. Also
        # groups the variables in the large unknown vector V into XD and
        # XA(unused) for later indexing
        for k in range(self.nk):  
            # Collocated states
            for i in range(self.points_per_interval):
                #
                for j in range(self.deg+1):
                              
                    # Get the expression for the state vector
                    XD[k][i][j] = V[offset:offset+ndiff]
                    if j !=0:
                        XA[k][i][j-1] = V[offset+ndiff:offset+ndiff+nalg]
                    # Add the initial condition
                    index = (self.deg+1)*(self.points_per_interval*k+i) + j
                    if k==0 and j==0 and i==0:
                        vars_init[offset:offset+ndiff] = \
                            self.NLPdata['xD_init'][index,:]
                        
                        vars_lb[offset:offset+ndiff] = \
                                self.NLPdata['xDi_min']
                        vars_ub[offset:offset+ndiff] = \
                                self.NLPdata['xDi_max']
                        offset += ndiff
                    else:
                        if j!=0:
                            vars_init[offset:offset+nx] = \
                            np.append(self.NLPdata['xD_init'][index,:],
                                      self.NLPdata['xA_init'][index,:])
                            
                            vars_lb[offset:offset+nx] = \
                            np.append(self.NLPdata['xD_min'],
                                      self.NLPdata['xA_min'])

                            vars_ub[offset:offset+nx] = \
                            np.append(self.NLPdata['xD_max'],
                                      self.NLPdata['xA_max'])

                            offset += nx
                        else:
                            vars_init[offset:offset+ndiff] = \
                                    self.NLPdata['xD_init'][index,:]

                            vars_lb[offset:offset+ndiff] = \
                                    self.NLPdata['xD_min']

                            vars_ub[offset:offset+ndiff] = \
                                    self.NLPdata['xD_max']

                            offset += ndiff
            
            # Parametrized controls (unused here)
            U[k] = V[offset:offset+nu]

        # Attach these initial conditions to external dictionary
        self.NLPdata['v_init'] = vars_init
        self.NLPdata['v_ub'] = vars_ub
        self.NLPdata['v_lb'] = vars_lb

        # Setting up the constraint function for the NLP. Over each
        # collocated state, ensure continuitity and system dynamics
        g = []
        lbg = []
        ubg = []

        # For all finite elements
        for k in range(self.nk):
            for i in range(self.points_per_interval):
                # For all collocation points
                for j in range(1,self.deg+1):   		
                    # Get an expression for the state derivative
                    # at the collocation point
                    xp_jk = 0
                    for j2 in range (self.deg+1):
                        # get the time derivative of the differential
                        # states (eq 10.19b)
                        xp_jk += self.C[j2][j]*XD[k][i][j2]
                    
                    # Add collocation equations to the NLP
                    [fk] = self.rfmod.call([0., xp_jk/self.h,
                                            XD[k][i][j], XA[k][i][j-1],
                                            U[k], P])
                    
                    # impose system dynamics (for the differential
                    # states (eq 10.19b))
                    g += [fk[:ndiff]]
                    lbg.append(np.zeros(ndiff)) # equality constraints
                    ubg.append(np.zeros(ndiff)) # equality constraints

                    # impose system dynamics (for the algebraic states
                    # (eq 10.19b)) (unused)
                    g += [fk[ndiff:]]                               
                    lbg.append(np.zeros(nalg)) # equality constraints
                    ubg.append(np.zeros(nalg)) # equality constraints
                    
                # Get an expression for the state at the end of the finite
                # element
                xf_k = 0
                for j in range(self.deg+1):
                    xf_k += self.D[j]*XD[k][i][j]
                    
                # if i==self.points_per_interval-1:

                # Add continuity equation to NLP
                if k+1 != self.nk: # End = Beginning of next
                    g += [XD[k+1][0][0] - xf_k]
                    lbg.append(-self.NLPdata['CONtol']*np.ones(ndiff))
                    ubg.append(self.NLPdata['CONtol']*np.ones(ndiff))
                
                else: # At the last segment
                    # Periodicity constraints (only for NEQ)
                    g += [XD[0][0][0][:self.neq] - xf_k[:self.neq]]
                    lbg.append(-self.NLPdata['CONtol']*np.ones(self.neq))
                    ubg.append(self.NLPdata['CONtol']*np.ones(self.neq))


                # else:
                #     g += [XD[k][i+1][0] - xf_k]
                
        # Flatten contraint arrays for last addition
        lbg = np.concatenate(lbg).tolist()
        ubg = np.concatenate(ubg).tolist()

        # Constraint to protect against fixed point solutions
        if self.NLPdata['FPgaurd'] is True:
            fout = self.model.call(cs.daeIn(t=self.tgrid[0],
                                            x=XD[0,0,0][:self.neq],
                                               p=V[:self.np]))[0]
            g += [cs.MX(cs.sumAll(fout**2))]
            lbg.append(np.array(self.NLPdata['FPTOL']))
            ubg.append(np.array(cs.inf))

        elif self.NLPdata['FPgaurd'] is 'all':
            fout = self.model.call(cs.daeIn(t=self.tgrid[0],
                                            x=XD[0,0,0][:self.neq],
                                               p=V[:self.np]))[0]
            g += [cs.MX(fout**2)]
            lbg += [self.NLPdata['FPTOL']]*self.neq
            ubg += [cs.inf]*self.neq



        # Nonlinear constraint function
        gfcn = cs.MXFunction([V],[cs.vertcat(g)])


        # Get Linear Interpolant for YDATA from TDATA
        objlist = []
        # xarr = np.array([V[self.np:][i] for i in \
        #         xrange(self.neq*self.nk*(self.deg+1))])
        # xarr = xarr.reshape([self.nk,self.deg+1,self.neq])
        
        # List of the times when each finite element starts
        felist = self.tgrid.reshape((self.nk,self.deg+1))[:,0]
        felist = np.hstack([felist, self.tgrid[-1]])

        def z(tau, zs):
            """
            Functon to calculate the interpolated values of z^K at a
            given tau (0->1). zs is a matrix with the symbolic state
            values within the finite element
            """

            def l(j,t):
                """
                Intermediate values for polynomial interpolation
                """
                tau = self.NLPdata['collocation_points']\
                        [self.NLPdata['cp']][self.deg]
                return np.prod(np.array([ 
                        (t - tau[k])/(tau[j] - tau[k]) 
                        for k in xrange(0,self.deg+1) if k is not j]))

            
            interp_vector = []
            for i in xrange(self.neq): # only state variables
                interp_vector += [np.sum(np.array([l(j, tau)*zs[j][i]
                                  for j in xrange(0, self.deg+1)]))]
            return interp_vector

        # Set up Objective Function by minimizing distance from
        # Collocation solution to Measurement Data

        # Number of measurement functions
        for i in xrange(self.NM):

            # Number of sampling points per measurement]
            for j in xrange(len(self.tdata[i])):

                # the interpolating polynomial wants a tau value,
                # where 0 < tau < 1, distance along current element.
                
                # Get the index for which finite element of the tdata 
                # values
                feind = get_ind(self.tdata[i][j],felist)[0]

                # Get the starting time and tau (0->1) for the tdata
                taustart = felist[feind]
                tau = (self.tdata[i][j] - taustart)*(self.nk+1)/self.tf

                x_interp = z(tau, XD[feind][0])
                # Broken in newest numpy version, likely need to redo
                # this whole file with most recent versions
                y_model = self.Amatrix[i].dot(x_interp)

                # Add measurement scaling
                if self.mpars[i]: y_model *= MP[sum(self.mpars[:i])]

                # Using relative diff's is probably messing up weights
                # in Identifiability
                diff = (y_model - self.ydata[i][j])

                if   self.NLPdata['ObjMethod'] == 'lsq':
                    dist = (diff**2/self.edata[i][j]**2)

                elif self.NLPdata['ObjMethod'] == 'laplace':
                    dist = cs.fabs(diff)/np.sqrt(self.edata[i][j])
                
                try: dist *= self.NLPdata['state_weights'][i]
                except KeyError: pass
                    
                objlist += [dist]

        # Minimization Objective
        if self.minimize_f:
            # Function integral
            f_integral = 0
            # For each finite element
            for i in xrange(self.nk):
                # For each collocation point
                fvals = []
                for j in xrange(self.deg+1):
                    fvals += [self.minimize_f(XD[i][0][j], P)]
                # integrate with weights
                f_integral += sum([fvals[k] * self.E[k] for k in
                                   xrange(self.deg+1)])

            objlist += [self.NLPdata['f_minimize_weight']*f_integral]



        # Stability Objective (Floquet Multipliers)
        if self.monodromy:
            s_final = XD[-1,-1,-1][-self.neq**2:]
            s_final = s_final.reshape((self.neq,self.neq))
            trace = sum([s_final[i,i] for i in xrange(self.neq)])
            objlist += [self.NLPdata['stability']*(trace - 1)**2]




        # Objective function of the NLP
        obj = cs.sumAll(cs.vertcat(objlist))
        ofcn = cs.MXFunction([V], [obj])

        self.CollocationSolver = cs.IpoptSolver(ofcn,gfcn)

        for opt,val in self.IpoptOpts.iteritems():
            self.CollocationSolver.setOption(opt,val)

        self.CollocationSolver.setOption('obj_scaling_factor',
                                         len(vars_init))
        
        if warmstart:
            self.CollocationSolver.setOption('warm_start_init_point','yes')
        
        # initialize the self.CollocationSolver
        self.CollocationSolver.init()
          
        # Initial condition
        self.CollocationSolver.setInput(vars_init,cs.NLP_X_INIT)

        # Bounds on x
        self.CollocationSolver.setInput(vars_lb,cs.NLP_LBX)
        self.CollocationSolver.setInput(vars_ub,cs.NLP_UBX)

        # Bounds on g
        self.CollocationSolver.setInput(np.array(lbg),cs.NLP_LBG)
        self.CollocationSolver.setInput(np.array(ubg),cs.NLP_UBG)

        if warmstart:
            self.CollocationSolver.setInput( \
                    self.WarmStartData['NLP_X_OPT'],cs.NLP_X_INIT)
            self.CollocationSolver.setInput( \
                    self.WarmStartData['NLP_LAMBDA_G'],cs.NLP_LAMBDA_INIT)
            self.CollocationSolver.setOutput( \
                    self.WarmStartData['NLP_LAMBDA_X'],cs.NLP_LAMBDA_X)
                    
        
        pdb.set_trace()
Exemplo n.º 14
0
class Dcm:

    x = ca.SX.sym('x')
    C1 = ca.Function('a', [x], [
        ca.if_else(ca.fabs(x) < eps, 1 - x**2 / 6 + x**4 / 120,
                   ca.sin(x) / x)
    ])
    C2 = ca.Function('b', [x], [
        ca.if_else(
            ca.fabs(x) < eps, 0.5 - x**2 / 24 + x**4 / 720,
            (1 - ca.cos(x)) / x**2)
    ])
    C3 = ca.Function('d', [x], [
        ca.if_else(
            ca.fabs(x) < eps, 0.5 + x**2 / 12 + 7 * x**4 / 720, x /
            (2 * ca.sin(x)))
    ])
    del x

    group_shape = (3, 3)
    group_params = 9
    algebra_params = 3

    def __init__(self):
        raise RuntimeError(
            'this class is just for scoping, do not instantiate')

    @classmethod
    def check_group_shape(cls, a):
        assert a.shape == cls.group_shape or a.shape == (cls.group_shape[0], )

    @classmethod
    def product(cls, a, b):
        cls.check_group_shape(a)
        cls.check_group_shape(b)
        return ca.mtimes(a, b)

    @classmethod
    def inv(cls, a):
        cls.check_group_shape(a)
        return ca.transpose(a)

    @classmethod
    def exp(cls, v):
        theta = ca.norm_2(v)
        X = wedge(v)
        return ca.SX.eye(3) + cls.C1(theta) * X + cls.C2(theta) * ca.mtimes(
            X, X)

    @classmethod
    def log(cls, R):
        theta = ca.arccos((ca.trace(R) - 1) / 2)
        return vee(cls.C3(theta) * (R - R.T))

    @classmethod
    def kinematics(cls, R, w):
        assert R.shape == (3, 3)
        assert w.shape == (3, 1)
        return ca.mtimes(R, wedge(w))

    @classmethod
    def from_quat(cls, q):
        assert q.shape == (4, 1)
        R = ca.SX(3, 3)
        a = q[0]
        b = q[1]
        c = q[2]
        d = q[3]
        aa = a * a
        ab = a * b
        ac = a * c
        ad = a * d
        bb = b * b
        bc = b * c
        bd = b * d
        cc = c * c
        cd = c * d
        dd = d * d
        R[0, 0] = aa + bb - cc - dd
        R[0, 1] = 2 * (bc - ad)
        R[0, 2] = 2 * (bd + ac)
        R[1, 0] = 2 * (bc + ad)
        R[1, 1] = aa + cc - bb - dd
        R[1, 2] = 2 * (cd - ab)
        R[2, 0] = 2 * (bd - ac)
        R[2, 1] = 2 * (cd + ab)
        R[2, 2] = aa + dd - bb - cc
        return R

    @classmethod
    def from_mrp(cls, r):
        assert r.shape == (4, 1)
        a = r[:3]
        X = wedge(a)
        n_sq = ca.dot(a, a)
        X_sq = ca.mtimes(X, X)
        R = ca.SX.eye(3) + (8 * X_sq - 4 * (1 - n_sq) * X) / (1 + n_sq)**2
        # return transpose, due to convention difference in book
        return R.T

    @classmethod
    def from_euler(cls, e):
        return cls.from_quat(Quat.from_euler(e))
Exemplo n.º 15
0
    def _initialize_mav_objective(self):
        """ Initialize the objective function to minimize the absolute value of
        the parameter vector """

        self.objective_sx += (self.pvar.alpha_sx[:] *
                              cs.sum1(cs.fabs(self.var.p_sx[:])))
Exemplo n.º 16
0
    def _integrate(self, model, t_eval, inputs=None):
        """
        Calculate the solution of the algebraic equations through root-finding

        Parameters
        ----------
        model : :class:`pybamm.BaseModel`
            The model whose solution to calculate.
        t_eval : :class:`numpy.array`, size (k,)
            The times at which to compute the solution
        inputs : dict, optional
            Any input parameters to pass to the model when solving. If any input
            parameters that are present in the model are missing from "inputs", then
            the solution will consist of `ProcessedSymbolicVariable` objects, which must
            be provided with inputs to obtain their value.
        """
        # Record whether there are any symbolic inputs
        inputs = inputs or {}
        has_symbolic_inputs = any(isinstance(v, casadi.MX) for v in inputs.values())

        # Create casadi objects for the root-finder
        inputs = casadi.vertcat(*[x for x in inputs.values()])

        y0 = model.y0
        # The casadi algebraic solver can read rhs equations, but leaves them unchanged
        # i.e. the part of the solution vector that corresponds to the differential
        # equations will be equal to the initial condition provided. This allows this
        # solver to be used for initialising the DAE solvers
        if model.rhs == {}:
            len_rhs = 0
            y0_diff = casadi.DM()
            y0_alg = y0
        else:
            len_rhs = model.concatenated_rhs.size
            y0_diff = y0[:len_rhs]
            y0_alg = y0[len_rhs:]

        y_alg = None

        # Set up
        t_sym = casadi.MX.sym("t")
        y_alg_sym = casadi.MX.sym("y_alg", y0_alg.shape[0])
        y_sym = casadi.vertcat(y0_diff, y_alg_sym)
        p_sym = casadi.MX.sym("p", inputs.shape[0])

        t_p_sym = casadi.vertcat(t_sym, p_sym)
        alg = model.casadi_algebraic(t_sym, y_sym, p_sym)

        # Set constraints vector in the casadi format
        # Constrain the unknowns. 0 (default): no constraint on ui, 1: ui >= 0.0,
        # -1: ui <= 0.0, 2: ui > 0.0, -2: ui < 0.0.
        constraints = np.zeros_like(model.bounds[0], dtype=int)
        # If the lower bound is positive then the variable must always be positive
        constraints[model.bounds[0] >= 0] = 1
        # If the upper bound is negative then the variable must always be negative
        constraints[model.bounds[1] <= 0] = -1

        # Set up rootfinder
        roots = casadi.rootfinder(
            "roots",
            "newton",
            dict(x=y_alg_sym, p=t_p_sym, g=alg),
            {
                **self.extra_options,
                "abstol": self.tol,
                "constraints": list(constraints[len_rhs:]),
            },
        )
        for idx, t in enumerate(t_eval):
            # Evaluate algebraic with new t and previous y0, if it's already close
            # enough then keep it
            # We can't do this if there are symbolic inputs
            if has_symbolic_inputs is False and np.all(
                abs(model.casadi_algebraic(t, y0, inputs).full()) < self.tol
            ):
                pybamm.logger.debug(
                    "Keeping same solution at t={}".format(t * model.timescale_eval)
                )
                if y_alg is None:
                    y_alg = y0_alg
                else:
                    y_alg = casadi.horzcat(y_alg, y0_alg)
            # Otherwise calculate new y_sol
            else:
                t_inputs = casadi.vertcat(t, inputs)
                # Solve
                try:
                    y_alg_sol = roots(y0_alg, t_inputs)
                    success = True
                    message = None
                    # Check final output
                    y_sol = casadi.vertcat(y0_diff, y_alg_sol)
                    fun = model.casadi_algebraic(t, y_sol, inputs)
                except RuntimeError as err:
                    success = False
                    message = err.args[0]
                    fun = None

                # If there are no symbolic inputs, check the function is below the tol
                # Skip this check if there are symbolic inputs
                if success and (
                    has_symbolic_inputs is True or np.all(casadi.fabs(fun) < self.tol)
                ):
                    # update initial guess for the next iteration
                    y0_alg = y_alg_sol
                    # update solution array
                    if y_alg is None:
                        y_alg = y_alg_sol
                    else:
                        y_alg = casadi.horzcat(y_alg, y_alg_sol)
                elif not success:
                    raise pybamm.SolverError(
                        "Could not find acceptable solution: {}".format(message)
                    )
                else:
                    raise pybamm.SolverError(
                        """
                        Could not find acceptable solution: solver terminated
                        successfully, but maximum solution error ({})
                        above tolerance ({})
                        """.format(
                            casadi.mmax(casadi.fabs(fun)), self.tol
                        )
                    )

        # Concatenate differential part
        y_diff = casadi.horzcat(*[y0_diff] * len(t_eval))
        y_sol = casadi.vertcat(y_diff, y_alg)
        # Return solution object (no events, so pass None to t_event, y_event)
        return pybamm.Solution(t_eval, y_sol, termination="success")
Exemplo n.º 17
0
    def __init__(self, **kwargs):
        # Check arguments
        assert ('model_folder' in kwargs)

        # Log pymoca version
        logger.debug("Using pymoca {}.".format(pymoca.__version__))

        # Transfer model from the Modelica .mo file to CasADi using pymoca
        if 'model_name' in kwargs:
            model_name = kwargs['model_name']
        else:
            if hasattr(self, 'model_name'):
                model_name = self.model_name
            else:
                model_name = self.__class__.__name__

        # Load model from pymoca backend
        self.__pymoca_model = pymoca.backends.casadi.api.transfer_model(
            kwargs['model_folder'], model_name, self.compiler_options())

        # Extract the CasADi MX variables used in the model
        self.__mx = {}
        self.__mx['time'] = [self.__pymoca_model.time]
        self.__mx['states'] = [v.symbol for v in self.__pymoca_model.states]
        self.__mx['derivatives'] = [
            v.symbol for v in self.__pymoca_model.der_states
        ]
        self.__mx['algebraics'] = [
            v.symbol for v in self.__pymoca_model.alg_states
        ]
        self.__mx['parameters'] = [
            v.symbol for v in self.__pymoca_model.parameters
        ]
        self.__mx['constant_inputs'] = []
        self.__mx['lookup_tables'] = []

        # TODO: implement delayed feedback
        delayed_feedback_variables = []

        for v in self.__pymoca_model.inputs:
            if v.symbol.name() in delayed_feedback_variables:
                # Delayed feedback variables are local to each ensemble, and
                # therefore belong to the collection of algebraic variables,
                # rather than to the control inputs.
                self.__mx['algebraics'].append(v.symbol)
            else:
                if v.symbol.name() in kwargs.get('lookup_tables', []):
                    self.__mx['lookup_tables'].append(v.symbol)
                else:
                    # All inputs are constant inputs
                    self.__mx['constant_inputs'].append(v.symbol)

        # Log variables in debug mode
        if logger.getEffectiveLevel() == logging.DEBUG:
            logger.debug("SimulationProblem: Found states {}".format(', '.join(
                [var.name() for var in self.__mx['states']])))
            logger.debug("SimulationProblem: Found derivatives {}".format(
                ', '.join([var.name() for var in self.__mx['derivatives']])))
            logger.debug("SimulationProblem: Found algebraics {}".format(
                ', '.join([var.name() for var in self.__mx['algebraics']])))
            logger.debug("SimulationProblem: Found constant inputs {}".format(
                ', '.join([var.name()
                           for var in self.__mx['constant_inputs']])))
            logger.debug("SimulationProblem: Found parameters {}".format(
                ', '.join([var.name() for var in self.__mx['parameters']])))

        # Initialize an AliasDict for nominals and types
        self.__nominals = AliasDict(self.alias_relation)
        self.__python_types = AliasDict(self.alias_relation)
        for v in itertools.chain(self.__pymoca_model.states,
                                 self.__pymoca_model.alg_states,
                                 self.__pymoca_model.inputs):
            sym_name = v.symbol.name()

            # Store the types in an AliasDict
            self.__python_types[sym_name] = v.python_type

            # If the nominal is 0.0 or 1.0 or -1.0, ignore: get_variable_nominal returns a default of 1.0
            # TODO: handle nominal vectors (update() will need to load them)
            if ca.MX(v.nominal).is_zero() or ca.MX(
                    v.nominal - 1).is_zero() or ca.MX(v.nominal + 1).is_zero():
                continue
            else:
                if ca.MX(v.nominal).size1() != 1:
                    logger.error(
                        'Vector Nominals not supported yet. ({})'.format(
                            sym_name))
                self.__nominals[sym_name] = ca.fabs(v.nominal)
                if logger.getEffectiveLevel() == logging.DEBUG:
                    logger.debug(
                        "SimulationProblem: Setting nominal value for variable {} to {}"
                        .format(sym_name, self.__nominals[sym_name]))

        # Initialize DAE and initial residuals
        variable_lists = [
            'states', 'der_states', 'alg_states', 'inputs', 'constants',
            'parameters'
        ]
        function_arguments = [self.__pymoca_model.time] + [
            ca.veccat(*[
                v.symbol for v in getattr(self.__pymoca_model, variable_list)
            ]) for variable_list in variable_lists
        ]

        self.__dae_residual = self.__pymoca_model.dae_residual_function(
            *function_arguments)

        self.__initial_residual = self.__pymoca_model.initial_residual_function(
            *function_arguments)
        if self.__initial_residual is None:
            self.__initial_residual = ca.MX()

        # Construct state vector
        self.__sym_list = self.__mx['states'] + self.__mx['algebraics'] + self.__mx['derivatives'] + \
            self.__mx['time'] + self.__mx['constant_inputs'] + self.__mx['parameters']
        self.__state_vector = np.full(len(self.__sym_list), np.nan)

        # A very handy index
        self.__states_end_index = len(self.__mx['states']) + \
            len(self.__mx['algebraics']) + len(self.__mx['derivatives'])

        # Construct a dict to look up symbols by name (or iterate over)
        self.__sym_dict = OrderedDict(
            ((sym.name(), sym) for sym in self.__sym_list))

        # Assemble some symbolics, including those needed for a backwards Euler derivative approximation
        X = ca.vertcat(*self.__sym_list[:self.__states_end_index])
        X_prev = ca.vertcat(*[
            ca.MX.sym(sym.name() + '_prev')
            for sym in self.__sym_list[:self.__states_end_index]
        ])
        dt = ca.MX.sym("delta_t")

        # Make a list of derivative approximations using backwards Euler formulation
        derivative_approximation_residuals = []
        for index, derivative_state in enumerate(self.__mx['derivatives']):
            derivative_approximation_residuals.append(
                derivative_state - (X[index] - X_prev[index]) / dt)

        # Append residuals for derivative approximations
        dae_residual = ca.vertcat(self.__dae_residual,
                                  *derivative_approximation_residuals)

        # TODO: implement lookup_tables

        # Make a list of unscaled symbols and a list of their scaled equivalent
        unscaled_symbols = []
        scaled_symbols = []
        for sym_name, nominal in self.__nominals.items():
            index = self.__get_state_vector_index(sym_name)
            # If the symbol is a state, Add the symbol to the lists
            if index <= self.__states_end_index:
                unscaled_symbols.append(X[index])
                scaled_symbols.append(X[index] * nominal)

                # Also scale previous states
                unscaled_symbols.append(X_prev[index])
                scaled_symbols.append(X_prev[index] * nominal)

        # Substitute unscaled terms for scaled terms
        dae_residual = ca.substitute(dae_residual,
                                     ca.vertcat(*unscaled_symbols),
                                     ca.vertcat(*scaled_symbols))

        if logger.getEffectiveLevel() == logging.DEBUG:
            logger.debug('SimulationProblem: DAE Residual is ' +
                         str(dae_residual))

        if X.size1() != dae_residual.size1():
            logger.error(
                'Formulation Error: Number of states ({}) does not equal number of equations ({})'
                .format(X.size1(), dae_residual.size1()))

        # Construct function parameters
        parameters = ca.vertcat(dt, X_prev,
                                *self.__sym_list[self.__states_end_index:])

        # Construct a function res_vals that returns the numerical residuals of a numerical state
        self.__res_vals = ca.Function("res_vals", [X, parameters],
                                      [dae_residual])

        # Use rootfinder() to make a function that takes a step forward in time by trying to zero res_vals()
        options = {'nlpsol': 'ipopt', 'nlpsol_options': self.solver_options()}
        self.__do_step = ca.rootfinder("next_state", "nlpsol", self.__res_vals,
                                       options)

        # Call parent class for default behaviour.
        super().__init__()
Exemplo n.º 18
0
            hqp.solve_sequential_problem()
            var_dot_sol = hqp.Mdict_seq[5]['x'].level()
            q_dot1_sol = var_dot_sol[0:14]
            s_dot1_sol = var_dot_sol[14]

            print("Solver time is = " + str(hqp.time_taken))
            comp_time.append(hqp.time_taken)
            con_viol1 = hqp.optimal_slacks[1]
            con_viol2 = hqp.optimal_slacks[2]
            con_viol3 = hqp.optimal_slacks[3]
            con_viol4 = hqp.optimal_slacks[4]
            constraint_violations = cs.horzcat(
                constraint_violations,
                cs.vertcat(con_viol1, con_viol2, con_viol3, con_viol4))

        number_non_zero = sum(cs.fabs(q_dot1_sol).full() >= 1e-3)
        q_zero_integral += number_non_zero * ts
        q_2_integral += (cs.sumsqr(q_dot1_sol)) * ts
        q_1_integral += sum(cs.fabs(q_dot1_sol).full()) * ts
        #compute q_1_integral

        #Update all the variables
        q_dot_opt = cs.vertcat(q_dot1_sol, s_dot1_sol)
        q_opt[0:15] += ts * q_dot_opt

        s1_opt = q_opt[14]
        if s1_opt >= 1:
            print("Robot1 reached it's goal. Terminating")
            cool_off_counter += 1

        if visualizationBullet:
Exemplo n.º 19
0
    def ode_rhs(self):
        """Muscle Model ODE rhs.
        Returns
        ----------
        ode_rhs: list<cas.SX>
            description
        """

        #: Bandpass l_ce
        #b, a = signal.butter(2, 50, 'low', analog=True)
        #l_ce_filt = signal.lfilter(b, a, self._l_ce.sym)

        l_ce_tol = cas.fmax(self._l_ce.sym, 0.0)
        _stim = cas.fmax(0.01, cas.fmin(self._stim.sym, 1.))

        #: Algrebaic Equation
        l_mtc = self._l_slack.val + self._l_opt.val + self._delta_length.sym
        l_se = l_mtc - l_ce_tol

        #: Muscle Acitvation Dynamics
        self._dA.sym = (
            _stim - self._activation.sym)/GeyerMuscle.tau_act

        #: Muscle Dynamics
        #: Series Force
        _f_se = (self._f_max.val * (
            (l_se - self._l_slack.val) / (
                self._l_slack.val * self.e_ref))**2) * (
                    l_se > self._l_slack.val)

        #: Muscle Belly Force
        _f_be_cond = self._l_opt.val * (1.0 - self.w)

        _f_be = (
            (self._f_max.val * (
                (l_ce_tol - self._l_opt.val * (1.0 - self.w)) / (
                    self._l_opt.val * self.w / 2.0))**2)) * (
            l_ce_tol <= _f_be_cond)

        #: Force-Length Relationship
        val = cas.fabs(
            (l_ce_tol - self._l_opt.val) / (self._l_opt.val * self.w))
        exposant = GeyerMuscle.c * val**3
        _f_l = cas.exp(exposant)

        #: Force Parallel Element
        _f_pe_star = (self._f_max.val * (
            (l_ce_tol - self._l_opt.val) / (self._l_opt.val * self.w))**2)*(
                l_ce_tol > self._l_opt.val)

        #: Force Velocity Inverse Relation
        _f_v_eq = ((
            self._f_max.val * self._activation.sym * _f_l) + _f_pe_star)

        f_v_cond = cas.logic_and(
            _f_v_eq < self.tol, _f_v_eq > -self.tol)

        _f_v = cas.if_else(f_v_cond, 0.0, (_f_se + _f_be) / ((
            self._f_max.val * self._activation.sym * _f_l) + _f_pe_star))

        f_v = cas.fmax(0.0, cas.fmin(_f_v, 1.5))

        self._v_ce.sym = cas.if_else(
            f_v < 1.0, self._v_max.sym * self._l_opt.val * (
                1.0 - f_v) / (1.0 + f_v * GeyerMuscle.K),
            self._v_max.sym*self._l_opt.val * (f_v - 1.0) / (
                7.56 * GeyerMuscle.K *
                (f_v - GeyerMuscle.N) + 1.0 - GeyerMuscle.N
            ))

        #: Active, Passive, Tendon Force Computation
        _f_v_ce = cas.if_else(
            self._v_ce.sym < 0.,
            (self._v_max.sym*self._l_opt.val - self._v_ce.sym) /
            (self._v_max.sym*self._l_opt.val + GeyerMuscle.K * self._v_ce.sym),
            GeyerMuscle.N + (GeyerMuscle.N - 1) * (
                self._v_max.sym*self._l_opt.val + self._v_ce.sym
            ) / (
                7.56 * GeyerMuscle.K * self._v_ce.sym - self._v_max.sym*self._l_opt.val
            ))

        self._a_force = self._activation.sym * _f_v_ce * _f_l * self._f_max.val
        self._p_force = _f_pe_star*_f_v - _f_be
        self._t_force = _f_se

        self._alg_tendon_force.sym = self._z_tendon_force.sym - self._t_force
        self._alg_active_force.sym = self._z_active_force.sym - self._a_force
        self._alg_passive_force.sym = self._z_passive_force.sym - self._p_force
        self._alg_v_ce.sym = self._z_v_ce.sym - self._v_ce.sym
        self._alg_l_mtc.sym = self._z_l_mtc.sym - l_mtc
        self._alg_dact.sym = self._z_dact.sym - self._dA.sym

        return True
Exemplo n.º 20
0
    def test_builtin(self):
        with open(os.path.join(TEST_DIR, 'BuiltinFunctions.mo'), 'r') as f:
            txt = f.read()
        ast_tree = parser.parse(txt)
        casadi_model = gen_casadi.generate(ast_tree, 'BuiltinFunctions')
        print("BuiltinFunctions", casadi_model)
        ref_model = Model()

        x = ca.MX.sym("x")
        y = ca.MX.sym("y")
        z = ca.MX.sym("z")
        w = ca.MX.sym("w")
        u = ca.MX.sym("u")

        ref_model.inputs = list(map(Variable, [x]))
        ref_model.outputs = list(map(Variable, [y, z, w, u]))
        ref_model.alg_states = list(map(Variable, [y, z, w, u]))
        ref_model.equations = [y - ca.sin(ref_model.time), z - ca.cos(x), w - ca.fmin(y, z), u - ca.fabs(w)]

        self.assert_model_equivalent_numeric(ref_model, casadi_model)
Exemplo n.º 21
0
def MinCurvature():
    track = run_map_gen()
    txs = track[:, 0]
    tys = track[:, 1]
    nvecs = track[:, 2:4]
    th_ns = [lib.get_bearing([0, 0], nvecs[i, 0:2]) for i in range(len(nvecs))]
    # sls = np.sqrt(np.sum(np.power(np.diff(track[:, :2], axis=0), 2), axis=1))

    n_max = 3
    N = len(track)

    n_f_a = ca.MX.sym('n_f', N)
    n_f = ca.MX.sym('n_f', N - 1)
    th_f_a = ca.MX.sym('n_f', N)
    th_f = ca.MX.sym('n_f', N - 1)

    x0_f = ca.MX.sym('x0_f', N - 1)
    x1_f = ca.MX.sym('x1_f', N - 1)
    y0_f = ca.MX.sym('y0_f', N - 1)
    y1_f = ca.MX.sym('y1_f', N - 1)
    th1_f = ca.MX.sym('y1_f', N - 1)
    th2_f = ca.MX.sym('y1_f', N - 1)
    th1_f1 = ca.MX.sym('y1_f', N - 2)
    th2_f1 = ca.MX.sym('y1_f', N - 2)

    o_x_s = ca.Function('o_x', [n_f], [track[:-1, 0] + nvecs[:-1, 0] * n_f])
    o_y_s = ca.Function('o_y', [n_f], [track[:-1, 1] + nvecs[:-1, 1] * n_f])
    o_x_e = ca.Function('o_x', [n_f], [track[1:, 0] + nvecs[1:, 0] * n_f])
    o_y_e = ca.Function('o_y', [n_f], [track[1:, 1] + nvecs[1:, 1] * n_f])

    dis = ca.Function('dis', [x0_f, x1_f, y0_f, y1_f],
                      [ca.sqrt((x1_f - x0_f)**2 + (y1_f - y0_f)**2)])

    track_length = ca.Function('length', [n_f_a], [
        dis(o_x_s(n_f_a[:-1]), o_x_e(n_f_a[1:]), o_y_s(n_f_a[:-1]),
            o_y_e(n_f_a[1:]))
    ])

    real = ca.Function(
        'real', [th1_f, th2_f],
        [ca.cos(th1_f) * ca.cos(th2_f) + ca.sin(th1_f) * ca.sin(th2_f)])
    im = ca.Function(
        'im', [th1_f, th2_f],
        [-ca.cos(th1_f) * ca.sin(th2_f) + ca.sin(th1_f) * ca.cos(th2_f)])

    sub_cmplx = ca.Function('a_cpx', [th1_f, th2_f],
                            [ca.atan(im(th1_f, th2_f) / real(th1_f, th2_f))])
    get_th_n = ca.Function(
        'gth', [th_f],
        [sub_cmplx(ca.pi * np.ones(N - 1), sub_cmplx(th_f, th_ns[:-1]))])

    real1 = ca.Function(
        'real', [th1_f1, th2_f1],
        [ca.cos(th1_f1) * ca.cos(th2_f1) + ca.sin(th1_f1) * ca.sin(th2_f1)])
    im1 = ca.Function(
        'im', [th1_f1, th2_f1],
        [-ca.cos(th1_f1) * ca.sin(th2_f1) + ca.sin(th1_f1) * ca.cos(th2_f1)])

    sub_cmplx1 = ca.Function(
        'a_cpx', [th1_f1, th2_f1],
        [ca.atan(im1(th1_f1, th2_f1) / real1(th1_f1, th2_f1))])

    d_n = ca.Function('d_n', [n_f_a, th_f],
                      [track_length(n_f_a) / ca.tan(get_th_n(th_f))])
    # curvature = ca.Function('curv', [th_f_a], [th_f_a[1:] - th_f_a[:-1]])
    grad = ca.Function(
        'grad', [n_f_a],
        [(o_y_e(n_f_a[1:]) - o_y_s(n_f_a[:-1])) /
         ca.fmax(o_x_e(n_f_a[1:]) - o_x_s(n_f_a[:-1]), 0.1 * np.ones(N - 1))])
    curvature = ca.Function(
        'curv', [n_f_a],
        [grad(n_f_a)[1:] - grad(n_f_a)[:-1]])  # changes in grad

    # define symbols
    n = ca.MX.sym('n', N)
    th = ca.MX.sym('th', N)


    nlp = {\
    'x': ca.vertcat(n, th),
    # 'f': ca.sumsqr(curvature(n)),
    # 'f': ca.sumsqr(sub_cmplx(th[1:], th[:-1])),
    'f': ca.sumsqr(sub_cmplx1(sub_cmplx1(th[2:], th[1:-1]), sub_cmplx1(th[1:-1], th[:-2]))),
    # 'f': ca.sumsqr(track_length(n)),
    'g': ca.vertcat(
                # dynamic constraints
                n[1:] - (n[:-1] + d_n(n, th[:-1])),

                # boundary constraints
                n[0], th[0],
                n[-1], #th[-1],
            ) \

    }

    S = ca.nlpsol('S', 'ipopt', nlp)

    ones = np.ones(N)
    n0 = ones * 0

    th0 = []
    for i in range(N - 1):
        th_00 = lib.get_bearing(track[i, 0:2], track[i + 1, 0:2])
        th0.append(th_00)

    th0.append(0)
    th0 = np.array(th0)

    x0 = ca.vertcat(n0, th0)

    lbx = [-n_max] * N + [-np.pi] * N
    ubx = [n_max] * N + [np.pi] * N

    print(curvature(n0))

    r = S(x0=x0, lbg=0, ubg=0, lbx=lbx, ubx=ubx)

    print(f"Solution found")
    x_opt = r['x']

    n_set = np.array(x_opt[:N])
    thetas = np.array(x_opt[1 * N:2 * N])

    # print(sub_cmplx(thetas[1:], thetas[:-1]))

    plt.figure(2)
    th_data = sub_cmplx(thetas[1:], thetas[:-1])
    plt.plot(th_data)
    plt.plot(ca.fabs(th_data), '--')
    plt.pause(0.001)

    plt.figure(3)
    plt.plot(abs(thetas), '--')
    plt.plot(thetas)
    plt.pause(0.001)

    plt.figure(4)
    plt.plot(sub_cmplx(th0[1:], th0[:-1]))
    plt.pause(0.001)

    plot_race_line(np.array(track), n_set, width=3, wait=True)
Exemplo n.º 22
0
def Abs(x):
    return ca.fabs(x)
Exemplo n.º 23
0
def abs(x):
    if not is_casadi_type(x):
        return _onp.abs(x)

    else:
        return _cas.fabs(x)
Exemplo n.º 24
0
    p_mean.append(pl.mean([k[j] for k in p_test]))
    p_std.append(pl.std([k[j] for k in p_test], ddof=0))

pe_test.compute_covariance_matrix()

# Generate report

print("\np_mean         = " + str(ca.DM(p_mean)))
print("phat_last_exp  = " + str(ca.DM(pe_test.estimated_parameters)))

print("\np_sd           = " + str(ca.DM(p_std)))
print("sd_from_covmat = " + str(ca.diag(ca.sqrt(pe_test.covariance_matrix))))
print("beta           = " + str(pe_test.beta))

print("\ndelta_abs_sd   = " + str(ca.fabs(ca.DM(p_std) - \
    ca.diag(ca.sqrt(pe_test.covariance_matrix)))))
print("delta_rel_sd   = " + str(ca.fabs(ca.DM(p_std) - \
    ca.diag(ca.sqrt(pe_test.covariance_matrix))) / ca.DM(p_std)))

fname = os.path.basename(__file__)[:-3] + ".rst"

report = open(fname, "w")
report.write( \
'''Concept test: covariance matrix computation
===========================================

Simulate system. Then: add gaussian noise N~(0, sigma^2), estimate,
store estimated parameter, repeat.

.. code-block:: python
Exemplo n.º 25
0
def rotationAngle(mx_R):
    acosarg = (casadi.trace(mx_R) - 1.) / 2.
    return casadi.if_else(casadi.fabs(acosarg) >= 1., 0., casadi.acos(acosarg))
Exemplo n.º 26
0
    def _convert(self, symbol, t=None, y=None, u=None):
        """ See :meth:`CasadiConverter.convert()`. """
        if isinstance(
            symbol,
            (
                pybamm.Scalar,
                pybamm.Array,
                pybamm.Time,
                pybamm.InputParameter,
                pybamm.ExternalVariable,
            ),
        ):
            return casadi.MX(symbol.evaluate(t, y, u))

        elif isinstance(symbol, pybamm.StateVector):
            if y is None:
                raise ValueError("Must provide a 'y' for converting state vectors")
            return casadi.vertcat(*[y[y_slice] for y_slice in symbol.y_slices])

        elif isinstance(symbol, pybamm.BinaryOperator):
            left, right = symbol.children
            # process children
            converted_left = self.convert(left, t, y, u)
            converted_right = self.convert(right, t, y, u)
            # _binary_evaluate defined in derived classes for specific rules
            return symbol._binary_evaluate(converted_left, converted_right)

        elif isinstance(symbol, pybamm.UnaryOperator):
            converted_child = self.convert(symbol.child, t, y, u)
            if isinstance(symbol, pybamm.AbsoluteValue):
                return casadi.fabs(converted_child)
            return symbol._unary_evaluate(converted_child)

        elif isinstance(symbol, pybamm.Function):
            converted_children = [
                self.convert(child, t, y, u) for child in symbol.children
            ]
            # Special functions
            if symbol.function == np.min:
                return casadi.mmin(*converted_children)
            elif symbol.function == np.max:
                return casadi.mmax(*converted_children)
            elif symbol.function == np.abs:
                return casadi.fabs(*converted_children)
            elif isinstance(symbol.function, (PchipInterpolator, CubicSpline)):
                return casadi.interpolant("LUT", "bspline", [symbol.x], symbol.y)(
                    *converted_children
                )
            elif symbol.function.__name__.startswith("elementwise_grad_of_"):
                differentiating_child_idx = int(symbol.function.__name__[-1])
                # Create dummy symbolic variables in order to differentiate using CasADi
                dummy_vars = [
                    casadi.MX.sym("y_" + str(i)) for i in range(len(converted_children))
                ]
                func_diff = casadi.gradient(
                    symbol.differentiated_function(*dummy_vars),
                    dummy_vars[differentiating_child_idx],
                )
                # Create function and evaluate it using the children
                casadi_func_diff = casadi.Function("func_diff", dummy_vars, [func_diff])
                return casadi_func_diff(*converted_children)
            # Other functions
            else:
                return symbol._function_evaluate(converted_children)
        elif isinstance(symbol, pybamm.Concatenation):
            converted_children = [
                self.convert(child, t, y, u) for child in symbol.children
            ]
            if isinstance(symbol, (pybamm.NumpyConcatenation, pybamm.SparseStack)):
                return casadi.vertcat(*converted_children)
            # DomainConcatenation specifies a particular ordering for the concatenation,
            # which we must follow
            elif isinstance(symbol, pybamm.DomainConcatenation):
                slice_starts = []
                all_child_vectors = []
                for i in range(symbol.secondary_dimensions_npts):
                    child_vectors = []
                    for child_var, slices in zip(
                        converted_children, symbol._children_slices
                    ):
                        for child_dom, child_slice in slices.items():
                            slice_starts.append(symbol._slices[child_dom][i].start)
                            child_vectors.append(
                                child_var[child_slice[i].start : child_slice[i].stop]
                            )
                    all_child_vectors.extend(
                        [v for _, v in sorted(zip(slice_starts, child_vectors))]
                    )
                return casadi.vertcat(*all_child_vectors)

        else:
            raise TypeError(
                """
                Cannot convert symbol of type '{}' to CasADi. Symbols must all be
                'linear algebra' at this stage.
                """.format(
                    type(symbol)
                )
            )
Exemplo n.º 27
0
    p_mean.append(pl.mean([k[j] for k in p_test]))
    p_std.append(pl.std([k[j] for k in p_test], ddof=0))

lsqpe_test.compute_covariance_matrix()

# Generate report

print("\np_mean         = " + str(ca.DMatrix(p_mean)))
print("phat_last_exp  = " + str(ca.DMatrix(lsqpe_test.phat)))

print("\np_sd           = " + str(ca.DMatrix(p_std)))
print("sd_from_covmat = " + str(ca.diag(ca.sqrt(lsqpe_test.Covp))))
print("beta           = " + str(lsqpe_test.beta))

print("\ndelta_abs_sd   = " + str(ca.fabs(ca.DMatrix(p_std) - \
    ca.diag(ca.sqrt(lsqpe_test.Covp)))))
print("delta_rel_sd   = " + str(ca.fabs(ca.DMatrix(p_std) - \
    ca.diag(ca.sqrt(lsqpe_test.Covp))) / ca.DMatrix(p_std)))

fname = os.path.basename(__file__)[:-3] + ".rst"

report = open(fname, "w")
report.write( \
'''Concept test: covariance matrix computation
===========================================

Simulate system. Then: add gaussian noise N~(0, sigma^2), estimate,
store estimated parameter, repeat.

.. code-block:: python
Exemplo n.º 28
0
def Cl_flat_plate(alpha, Re_c):
    Re_c = cas.fabs(Re_c)
    alpha_rad = alpha * np.pi / 180
    return 2 * np.pi * alpha_rad
Exemplo n.º 29
0
    def runSolver(self,U,trajTrue=None):
        # make sure all bounds are set
        (xMissing,pMissing) = self._guessMap.getMissing()
        msg = []
        for name in xMissing:
            msg.append("you forgot to set a guess for \""+name+"\" at timesteps: "+str(xMissing[name]))
        for name in pMissing:
            msg.append("you forgot to set a guess for \""+name+"\"")
        if len(msg)>0:
            raise ValueError('\n'.join(msg))


        lbx,ubx = zip(*(self._boundMap.vectorize()))
        xk = C.DMatrix(list(self._guessMap.vectorize()))

        for k in range(100):
            ############# plot stuff ###############
            print "iteration: ",k
#            import nmheMaps
#            xOpt = np.array(xk).squeeze()
#            traj = nmheMaps.VectorizedReadOnlyNmheMap(self.dae,self.nk,xOpt)
#            
#            xsT =  np.array([trajTrue.lookup('x',timestep=kk) for kk in range(self.nk+1)] )
#            ysT =  np.array([trajTrue.lookup('y',timestep=kk) for kk in range(self.nk+1)] )
#            zsT =  np.array([trajTrue.lookup('z',timestep=kk) for kk in range(self.nk+1)] )
#
#            xs =  np.array([traj.lookup('x',timestep=kk) for kk in range(self.nk+1)] )
#            ys =  np.array([traj.lookup('y',timestep=kk) for kk in range(self.nk+1)] )
#            zs =  np.array([traj.lookup('z',timestep=kk) for kk in range(self.nk+1)] )
#            
#            outputMap = nmheMaps.NmheOutputMap(self._outputMapGenerator, xOpt, U)
#            c = np.array([outputMap.lookup('c',timestep=kk) for kk in range(self.nk)])
#            cdot = np.array([outputMap.lookup('cdot',timestep=kk) for kk in range(self.nk)])
#
#            figure()
#            title(str(float(k)))
#            subplot(3,2,1)
#            plot(xs)
#            plot(xsT)
#            ylabel('x '+str(k))
#            
#            subplot(3,2,3)
#            plot(ys)
#            plot(ysT)
#            ylabel('y '+str(k))
#            
#            subplot(3,2,5)
#            plot(zs)
#            plot(zsT)
#            ylabel('z '+str(k))
#            
##            subplot(2,2,2)
##            plot(dxs,-dzs)
##            ylabel('vel')
##            axis('equal')
#
#            subplot(3,2,2)
#            plot(c)
#            ylabel('c')
#
#            subplot(3,2,4)
#            plot(cdot)
#            ylabel('cdot')
#            ##########################################


            self.masterFun.setInput(xk,0)
            self.masterFun.setInput(U,1)
            t0 = time.time()
            try:
                self.masterFun.evaluate()
            except RuntimeError as e:
                print "ERRRRRRRRRRRRROR"
                show()
                raise e

            t1 = time.time()
            masterFunTime = (t1-t0)*1000
            hessL  = self.masterFun.output(0)
            gradF  = self.masterFun.output(1)
            g      = self.masterFun.output(2)
            jacobG = self.masterFun.output(3)
            f      = self.masterFun.output(4)

            self.qp.setInput(0,      C.QP_X_INIT)
            self.qp.setInput(hessL,  C.QP_H)
            self.qp.setInput(jacobG, C.QP_A)
            self.qp.setInput(gradF,  C.QP_G)

            assert all((lbx-xk) <= 0), "lower bounds violation"
            assert all((ubx-xk) >= 0), "upper bounds violation"
            self.qp.setInput(lbx-xk,C.QP_LBX)
            self.qp.setInput(ubx-xk,C.QP_UBX)
            
            self.qp.setInput(self.glb-g, C.QP_LBA)
            self.qp.setInput(self.gub-g, C.QP_UBA)

            t0 = time.time()
            self.qp.evaluate()
            t1 = time.time()

#            print "gradF: ",gradF
#            print 'dim(jacobG): "gra
#            print "rank: ",np.linalg.matrix_rank(jacobG)
            print "masterFun delta time: %.3f ms" % masterFunTime
            print "f: ",f,'\tmax constraint: ',max(C.fabs(g))
            print "qp delta time: %.3f ms" % ((t1-t0)*1000)
            print ""
            deltaX = self.qp.output(C.QP_PRIMAL)

#            import scipy.io
#            scipy.io.savemat('hessL.mat',{'hessL':np.array(hessL),
#                                          'gradF':np.array(gradF),
#                                          'x0':0*np.array(deltaX),
#                                          'xopt':np.array(deltaX),
#                                          'lbx':np.array(lbx-xk),
#                                          'ubx':np.array(ubx-xk),
#                                          'jacobG':np.array(jacobG),
#                                          'lba':np.array(self.glb-g),
#                                          'uba':np.array(self.gub-g)})
#            import sys; sys.exit()

#            print deltaX
            xk += deltaX
Exemplo n.º 30
0
def Cf_flat_plate(Re_L):
    Re_L = cas.fabs(Re_L)
    # return 0.074 / Re_L ** 0.2  # Turbulent flat plate
    # return 0.02666 * Re_L ** -0.139  # Schlichting's model, roughly accounts for laminar part ("Boundary Layer Theory" 7th Ed., pg. 644)
    # return smoothmax(0.074 / Re_L ** 0.2 - 1742 / Re_L, 1.33 / Re_L ** 0.5, 1000)
    return cas.fmax(0.074 / Re_L**0.2 - 1742 / Re_L, 1.33 / Re_L**0.5)
Exemplo n.º 31
0
def Cf_flat_plate_convex(Re_L):
    Re_L = cas.fabs(Re_L)
    # return 0.074 / Re_L ** 0.2  # Turbulent flat plate
    return 0.02666 * Re_L**-0.139  # Schlichting's model, roughly accounts for laminar part ("Boundary Layer Theory" 7th Ed., pg. 644)
Exemplo n.º 32
0
    def _initialize_mav_objective(self):
        """ Initialize the objective function to minimize the absolute value of
        the flux vector """

        self.objective_sx += (self.col_vars['alpha'] *
                              cs.fabs(self.var.v_sx).values.sum())
Exemplo n.º 33
0
    p_mean.append(pl.mean([k[j] for k in p_test]))
    p_std.append(pl.std([k[j] for k in p_test], ddof = 0))

lsqpe_test.compute_covariance_matrix()


# Generate report

print("\np_mean         = " + str(ca.DMatrix(p_mean)))
print("phat_last_exp  = " + str(ca.DMatrix(lsqpe_test.phat)))

print("\np_sd           = " + str(ca.DMatrix(p_std)))
print("sd_from_covmat = " + str(ca.diag(ca.sqrt(lsqpe_test.Covp))))
print("beta           = " + str(lsqpe_test.beta))

print("\ndelta_abs_sd   = " + str(ca.fabs(ca.DMatrix(p_std) - \
    ca.diag(ca.sqrt(lsqpe_test.Covp)))))
print("delta_rel_sd   = " + str(ca.fabs(ca.DMatrix(p_std) - \
    ca.diag(ca.sqrt(lsqpe_test.Covp))) / ca.DMatrix(p_std)))


fname = os.path.basename(__file__)[:-3] + ".rst"

report = open(fname, "w")
report.write( \
'''Concept test: covariance matrix computation
===========================================

Simulate system. Then: add gaussian noise N~(0, sigma^2), estimate,
store estimated parameter, repeat.

.. code-block:: python
Exemplo n.º 34
0
    def _integrate(self, model, t_eval, inputs_dict=None):
        """
        Calculate the solution of the algebraic equations through root-finding

        Parameters
        ----------
        model : :class:`pybamm.BaseModel`
            The model whose solution to calculate.
        t_eval : :class:`numpy.array`, size (k,)
            The times at which to compute the solution
        inputs_dict : dict, optional
            Any input parameters to pass to the model when solving. If any input
            parameters that are present in the model are missing from "inputs", then
            the solution will consist of `ProcessedSymbolicVariable` objects, which must
            be provided with inputs to obtain their value.
        """
        # Record whether there are any symbolic inputs
        inputs_dict = inputs_dict or {}
        has_symbolic_inputs = any(
            isinstance(v, casadi.MX) for v in inputs_dict.values())
        symbolic_inputs = casadi.vertcat(
            *[v for v in inputs_dict.values() if isinstance(v, casadi.MX)])

        # Create casadi objects for the root-finder
        inputs = casadi.vertcat(*[v for v in inputs_dict.values()])

        y0 = model.y0

        # If y0 already satisfies the tolerance for all t then keep it
        if has_symbolic_inputs is False and all(
                np.all(
                    abs(model.casadi_algebraic(t, y0, inputs).full()) <
                    self.tol) for t in t_eval):
            pybamm.logger.debug("Keeping same solution at all times")
            return pybamm.Solution(t_eval,
                                   y0,
                                   model,
                                   inputs_dict,
                                   termination="success")

        # The casadi algebraic solver can read rhs equations, but leaves them unchanged
        # i.e. the part of the solution vector that corresponds to the differential
        # equations will be equal to the initial condition provided. This allows this
        # solver to be used for initialising the DAE solvers
        if model.rhs == {}:
            len_rhs = 0
            y0_diff = casadi.DM()
            y0_alg = y0
        else:
            len_rhs = model.concatenated_rhs.size
            y0_diff = y0[:len_rhs]
            y0_alg = y0[len_rhs:]

        y_alg = None

        # Set up
        t_sym = casadi.MX.sym("t")
        y_alg_sym = casadi.MX.sym("y_alg", y0_alg.shape[0])
        y_sym = casadi.vertcat(y0_diff, y_alg_sym)

        t_and_inputs_sym = casadi.vertcat(t_sym, symbolic_inputs)
        alg = model.casadi_algebraic(t_sym, y_sym, inputs)

        # Check interpolant extrapolation
        if model.interpolant_extrapolation_events_eval:
            extrap_event = [
                event(0, y0, inputs)
                for event in model.interpolant_extrapolation_events_eval
            ]
            if extrap_event:
                if (np.concatenate(extrap_event) < self.extrap_tol).any():
                    extrap_event_names = []
                    for event in model.events:
                        if (event.event_type
                                == pybamm.EventType.INTERPOLANT_EXTRAPOLATION
                                and (event.expression.evaluate(
                                    0, y0.full(), inputs=inputs) <
                                     self.extrap_tol)):
                            extrap_event_names.append(event.name[12:])

                    raise pybamm.SolverError(
                        "CasADi solver failed because the following interpolation "
                        "bounds were exceeded at the initial conditions: {}. "
                        "You may need to provide additional interpolation points "
                        "outside these bounds.".format(extrap_event_names))

        # Set constraints vector in the casadi format
        # Constrain the unknowns. 0 (default): no constraint on ui, 1: ui >= 0.0,
        # -1: ui <= 0.0, 2: ui > 0.0, -2: ui < 0.0.
        constraints = np.zeros_like(model.bounds[0], dtype=int)
        # If the lower bound is positive then the variable must always be positive
        constraints[model.bounds[0] >= 0] = 1
        # If the upper bound is negative then the variable must always be negative
        constraints[model.bounds[1] <= 0] = -1

        # Set up rootfinder
        roots = casadi.rootfinder(
            "roots",
            "newton",
            dict(x=y_alg_sym, p=t_and_inputs_sym, g=alg),
            {
                **self.extra_options,
                "abstol": self.tol,
                "constraints": list(constraints[len_rhs:]),
            },
        )
        timer = pybamm.Timer()
        integration_time = 0
        for idx, t in enumerate(t_eval):
            # Evaluate algebraic with new t and previous y0, if it's already close
            # enough then keep it
            # We can't do this if there are symbolic inputs
            if has_symbolic_inputs is False and np.all(
                    abs(model.casadi_algebraic(t, y0, inputs).full()) <
                    self.tol):
                pybamm.logger.debug("Keeping same solution at t={}".format(
                    t * model.timescale_eval))
                if y_alg is None:
                    y_alg = y0_alg
                else:
                    y_alg = casadi.horzcat(y_alg, y0_alg)
            # Otherwise calculate new y_sol
            else:
                t_eval_inputs_sym = casadi.vertcat(t, symbolic_inputs)
                # Solve
                try:
                    timer.reset()
                    y_alg_sol = roots(y0_alg, t_eval_inputs_sym)
                    integration_time += timer.time()
                    success = True
                    message = None
                    # Check final output
                    y_sol = casadi.vertcat(y0_diff, y_alg_sol)
                    fun = model.casadi_algebraic(t, y_sol, inputs)
                except RuntimeError as err:
                    success = False
                    message = err.args[0]
                    fun = None

                # If there are no symbolic inputs, check the function is below the tol
                # Skip this check if there are symbolic inputs
                if success and (has_symbolic_inputs is True or
                                (not any(np.isnan(fun))
                                 and np.all(casadi.fabs(fun) < self.tol))):
                    # update initial guess for the next iteration
                    y0_alg = y_alg_sol
                    y0 = casadi.vertcat(y0_diff, y0_alg)
                    # update solution array
                    if y_alg is None:
                        y_alg = y_alg_sol
                    else:
                        y_alg = casadi.horzcat(y_alg, y_alg_sol)
                elif not success:
                    raise pybamm.SolverError(
                        "Could not find acceptable solution: {}".format(
                            message))
                elif any(np.isnan(fun)):
                    raise pybamm.SolverError(
                        "Could not find acceptable solution: solver returned NaNs"
                    )
                else:
                    raise pybamm.SolverError("""
                        Could not find acceptable solution: solver terminated
                        successfully, but maximum solution error ({})
                        above tolerance ({})
                        """.format(casadi.mmax(casadi.fabs(fun)), self.tol))

        # Concatenate differential part
        y_diff = casadi.horzcat(*[y0_diff] * len(t_eval))
        y_sol = casadi.vertcat(y_diff, y_alg)
        # Return solution object (no events, so pass None to t_event, y_event)
        sol = pybamm.Solution([t_eval],
                              y_sol,
                              model,
                              inputs_dict,
                              termination="success")
        sol.integration_time = integration_time
        return sol
Exemplo n.º 35
0
p_std = pl.std(p_test, ddof=0)

pe_test.compute_covariance_matrix()
pe_test.print_estimation_results()


# Generate report

print("\np_mean         = " + str(ca.DMatrix(p_mean)))
print("phat_last_exp  = " + str(ca.DMatrix(pe_test.estimated_parameters)))

print("\np_sd           = " + str(ca.DMatrix(p_std)))
print("sd_from_covmat = " + str(ca.diag(ca.sqrt(pe_test.covariance_matrix))))
print("beta           = " + str(pe_test.beta))

print("\ndelta_abs_sd   = " + str(ca.fabs(ca.DMatrix(p_std) - \
    ca.diag(ca.sqrt(pe_test.covariance_matrix)))))
print("delta_rel_sd   = " + str(ca.fabs(ca.DMatrix(p_std) - \
    ca.diag(ca.sqrt(pe_test.covariance_matrix))) / ca.DMatrix(p_std)))


fname = os.path.basename(__file__)[:-3] + ".rst"

report = open(fname, "w")
report.write( \
'''Concept test: covariance matrix computation
===========================================

Simulate system. Then: add gaussian noise N~(0, sigma^2), estimate,
store estimated parameter, repeat.

.. code-block:: python
Exemplo n.º 36
0
def hqpvschqp(params):
    result = {}
    n = params['n']
    total_eq_constraints = params['total_eq_constraints']
    total_ineq_constraints = params['total_ineq_constraints']
    pl = params['pl']  #priority levels
    gamma = params['gamma']
    rand_seed = params['rand_seed']
    adaptive_method = params['adaptive_method']  #True

    n_eq_per_level = int(total_eq_constraints / pl)
    eq_last_level = n_eq_per_level + (total_eq_constraints -
                                      n_eq_per_level * pl)
    n_ineq_per_level = int(total_ineq_constraints / pl)
    ineq_last_level = n_ineq_per_level + (total_ineq_constraints -
                                          n_ineq_per_level * pl)
    # print(n_eq_per_level)
    # print(n_ineq_per_level)
    # print(eq_first_level)
    # print(ineq_first_level)
    count_hierarchy_failue = 0
    count_same_constraints = 0
    count_identical_solution = 0
    count_geq_constraints = 0

    hqp = hqp_p.hqp()
    print("Using random seed " + str(rand_seed))
    # n_eq_per_level = 6
    # n_ineq_per_level = 6
    np.random.seed(
        rand_seed)  #for 45, 1, 8 HQP-l1 does not agree with the cHQP
    #15, 18, 11 for 8 priority levels

    x, x_dot = hqp.create_variable(n, 1e-12)
    A_eq_all = cs.DM(np.random.randn(params['eq_con_rank'], n))
    A_extra = (A_eq_all.T @ cs.DM(
        np.random.randn(params['eq_con_rank'],
                        total_eq_constraints - params['eq_con_rank']))).T
    A_eq_all = cs.vertcat(A_eq_all, A_extra)
    A_eq_all = A_eq_all.full()
    np.random.shuffle(A_eq_all)
    A_eq_all += np.random.randn(total_eq_constraints, n) * 1e-5
    b_eq_all = np.random.randn(total_eq_constraints)
    #normalizing the each row vector
    row_vec_norm = []
    for i in range(A_eq_all.shape[0]):
        row_vec_norm.append(cs.norm_1(A_eq_all[i, :]))
        # print(row_vec_norm)
        b_eq_all[i] /= row_vec_norm[i]
        for j in range(A_eq_all.shape[1]):
            A_eq_all[i, j] = A_eq_all[i, j].T / row_vec_norm[i]

    A_ineq_all = cs.DM(np.random.randn(params['ineq_con_rank'], n))
    A_ineq_extra = (A_ineq_all.T @ cs.DM(
        np.random.randn(params['ineq_con_rank'],
                        total_ineq_constraints - params['ineq_con_rank']))).T
    A_ineq_all = cs.vertcat(A_ineq_all, A_ineq_extra)
    A_ineq_all = A_ineq_all.full()
    np.random.shuffle(A_ineq_all)
    b_ineq_all = np.random.randn(total_ineq_constraints)
    b_ineq_all_lower = b_ineq_all - 1
    # print(b_ineq_all)
    # print(b_ineq_all_lower)
    #normalizing the each row vector
    row_ineqvec_norm = []
    for i in range(A_ineq_all.shape[0]):
        row_ineqvec_norm.append(cs.norm_1(A_ineq_all[i, :]))
        b_ineq_all[i] /= row_ineqvec_norm[i]
        b_ineq_all_lower[i] /= row_ineqvec_norm[i]
        for j in range(A_ineq_all.shape[1]):
            A_ineq_all[i, j] = A_ineq_all[i, j] / row_ineqvec_norm[i]

    A_eq = {}
    b_eq = {}
    A_eq_opti = {}
    b_eq_opti = {}
    A_ineq_opti = {}
    b_upper_opti = {}

    A_ineq = {}
    b_upper = {}
    b_lower = {}

    params = x
    params_init = [0] * n
    #create these tasks
    counter_eq = 0
    counter_ineq = 0
    # print(row_ineqvec_norm)
    for i in range(pl):

        if i != pl - 1:
            A_eq[i] = A_eq_all[counter_eq:counter_eq + n_eq_per_level, :]
            b_eq[i] = b_eq_all[counter_eq:counter_eq + n_eq_per_level]
            counter_eq += n_eq_per_level
            hqp.create_constraint(cs.mtimes(A_eq[i], x_dot) - b_eq[i],
                                  'equality',
                                  priority=i)

            A_ineq[i] = A_ineq_all[counter_ineq:counter_ineq +
                                   n_ineq_per_level, :]
            b_upper[i] = b_ineq_all[counter_ineq:counter_ineq +
                                    n_ineq_per_level]
            b_lower[i] = b_ineq_all_lower[counter_ineq:counter_ineq +
                                          n_ineq_per_level]
            counter_ineq += n_ineq_per_level
            hqp.create_constraint(A_ineq[i] @ x_dot,
                                  'lub',
                                  priority=i,
                                  options={
                                      'lb': b_lower[i],
                                      'ub': b_upper[i]
                                  })

        else:
            A_eq[i] = A_eq_all[counter_ineq:counter_ineq + eq_last_level, :]
            b_eq[i] = b_eq_all[counter_eq:counter_eq + eq_last_level]
            counter_eq += eq_last_level
            hqp.create_constraint(cs.mtimes(A_eq[i], x_dot) - b_eq[i],
                                  'equality',
                                  priority=i)

            A_ineq[i] = A_ineq_all[counter_ineq:counter_ineq +
                                   ineq_last_level, :]
            b_upper[i] = b_ineq_all[counter_ineq:counter_ineq +
                                    ineq_last_level]
            b_lower[i] = b_ineq_all_lower[counter_ineq:counter_ineq +
                                          ineq_last_level]
            counter_ineq += ineq_last_level
            hqp.create_constraint(A_ineq[i] @ x_dot,
                                  'lub',
                                  priority=i,
                                  options={
                                      'lb': b_lower[i],
                                      'ub': b_upper[i]
                                  })

    # print(A_eq)
    # p_opts = {"expand":True}
    # s_opts = {"max_iter": 100, 'tol':1e-8}#, 'hessian_approximation':'limited-memory', 'limited_memory_max_history' : 5, 'tol':1e-6}
    # hqp.opti.solver('ipopt', p_opts, s_opts)
    qpsol_options = {'error_on_fail': False}
    hqp.opti.solver(
        "sqpmethod", {
            "expand": True,
            "qpsol": 'qpoases',
            'qpsol_options': qpsol_options,
            'print_iteration': False,
            'print_header': True,
            'print_status': True,
            "print_time": True,
            'max_iter': 1000
        })

    blockPrint()
    hqp.variables0 = params
    hqp.configure()

    # hqp.setup_cascadedQP()

    sol_chqp = hqp.solve_cascadedQP5(params_init, [0] * n)
    enablePrint()
    chqp_status = sol_chqp != False
    result['chqp_status'] = chqp_status
    if not chqp_status:
        print("cHQP failed")
    else:
        result['chqp_time'] = hqp.time_taken
        print("Time taken by chqp = " + str(hqp.time_taken))
        # if not adaptive_method:
        # 	return False, False, False, False, False, False
        # else:
        # 	return False, False, False, False, False, False, False

    blockPrint()
    if not adaptive_method:
        hqp.time_taken = 0
        sol = hqp.solve_HQPl1(params_init, [0] * n, gamma_init=gamma)
        enablePrint()
        print("Time taken by the non-adaptive method = " + str(hqp.time_taken))
    else:
        # tic = time.time()
        sol, hierarchical_failure = hqp.solve_adaptive_hqp3(params_init,
                                                            [0] * n,
                                                            gamma_init=gamma)
        # toc = time.time() - tic
        tp = 0  #true positive
        fp = 0
        tn = 0
        fn = 0
    hqp_status = sol != False
    enablePrint()
    # print("Total time taken adaptive HQP = " + str(toc))
    result['hqp_status'] = hqp_status
    if not hqp_status:
        print("hqp-l1 failed")
    else:
        result['hqp_time'] = hqp.time_taken
        if adaptive_method:
            result['heuristic_prediction'] = len(hierarchical_failure) == 0
        # if not adaptive_method:
        # 	return False, False, False, True, False, False
        # else:
        # 	return False, False, False, True, False, False, False

    #further analysis and comparison only if both hqp and chqp returned without failing

    if hqp_status and chqp_status:

        x_dot_sol = sol.value(x_dot)
        # print(x_dot_sol)
        con_viol2 = []
        # x_dot_sol2 = sol_chqp[pl - 1].value(x_dot)
        x_dot_sol2 = sol_chqp[pl - 1].value(hqp.cHQP_xdot[pl - 1])
        # print(x_dot_sol2)
        con_viol = []

        geq_constraints_satisfied = True
        same_constraints_satisfied = True
        lex_con_norm = True

        verbose = False
        running_counter_satisfied_con_hqp = 0
        running_counter_satisfied_con_chqp = 0
        for i in range(1, pl):

            sol_hqp = sol.value(hqp.slacks[i])

            obj_sol_hqp = sum(list(sol_hqp)) + 0.1 * cs.sumsqr(sol_hqp)

            con_viol.append(cs.norm_1(sol_hqp))
            # sol_cHQP = sol_chqp[pl - 1].value(hqp.slacks[i])
            sol_cHQP = sol_chqp[i].value(hqp.cHQP_slacks[i])
            obj_sol_chqp = sum(list(sol_cHQP)) + 0.1 * cs.sumsqr(sol_cHQP)

            con_viol2.append(cs.norm_1(sol_cHQP))

            #Computing which constraints are satisfied
            satisfied_con_hqp = sol_hqp <= 1e-8
            satisfied_con_chqp = sol_cHQP <= 1e-8

            #computing whether the same constriants are satisfied
            if (satisfied_con_hqp != satisfied_con_chqp).any():
                same_constraints_satisfied = False

            #compute if a geq number of constraints are satisfied by the hqp
            running_counter_satisfied_con_chqp += sum(satisfied_con_chqp)
            running_counter_satisfied_con_hqp += sum(satisfied_con_hqp)
            if running_counter_satisfied_con_hqp < running_counter_satisfied_con_chqp:
                geq_constraints_satisfied = False

            # if cs.norm_1(sol_hqp) > cs.norm_1(sol_cHQP) + 1e-4:
            if obj_sol_hqp > obj_sol_chqp + 1e-4:
                lex_con_norm = False
                if verbose:
                    print("Lex norm unsatisfied!!!!!!!!!!!!!!!!!")

            if verbose:  #make true if print
                print("Level hqp-l1" + str(i))
                print(sol_hqp)
                print("Level chqp" + str(i))
                print(sol_cHQP)
                print(satisfied_con_hqp)
                print(satisfied_con_chqp)

        if verbose:
            print(x_dot_sol)
            print(x_dot_sol2)
            print(hqp.slack_weights)

        if verbose:
            print(con_viol)
            print(con_viol2)
            # print("diff between solutions = " + str(max(cs.fabs(x_dot_sol - x_dot_sol2).full())))

        identical_solution = max(
            cs.fabs(x_dot_sol - x_dot_sol2).full()) <= 1e-4
        if identical_solution:
            # print("Identical solution by both methods!!")
            count_identical_solution += 1
            count_geq_constraints += 1
            count_same_constraints += 1
            if adaptive_method:
                if len(hierarchical_failure) > 0:
                    fp += 1
                else:
                    tn += 1

        elif same_constraints_satisfied:
            # print("same constraints are satisfied")
            count_same_constraints += 1
            count_geq_constraints += 1
            if adaptive_method:
                if len(hierarchical_failure) > 0:
                    fp += 1
                else:
                    tn += 1

        elif geq_constraints_satisfied or lex_con_norm:
            # print("The same of greater number of constriants satisfied at each level")
            count_geq_constraints += 1
            if adaptive_method:
                if len(hierarchical_failure) > 0:
                    fp += 1
                else:
                    tn += 1

        else:
            # print("hierarchy failed!!!!!!!!!!!!!!!!")
            count_hierarchy_failue += 1
            if adaptive_method:
                if len(hierarchical_failure) > 0:
                    tp += 1
                else:
                    fn += 1
        if verbose:
            print("hierarchy failed " + str(count_hierarchy_failue))
            print("Identical solution " + str(count_identical_solution))
            print("Same constraints satisfied " + str(count_same_constraints))
            print("Geq constraints satisfied " + str(count_geq_constraints))
        result['identical_solution'] = identical_solution[0]
        result['same_constraints_satisfied'] = same_constraints_satisfied
        result['geq_constraints_satisfied'] = geq_constraints_satisfied
        result['lex_con_norm'] = lex_con_norm

        if adaptive_method:
            result['heuristic_predictor'] = {
                'tn': tn,
                'tp': tp,
                'fp': fp,
                'fn': fn
            }
    return result