Ejemplo n.º 1
0
		def findLyapunovFunctionSOS(self, x0, deg_V, deg_L):
			prog = MathematicalProgram()
			# Declare the "indeterminates", x. These are the variables which define the polynomials
			z = prog.NewIndeterminates(nZ,'z')
			x = prog.NewIndeterminates(1, 'x')[0]
			y = prog.NewIndeterminates(1, 'y')[0]
			thetadot = prog.NewIndeterminates(1, 'thetadot')[0]
			X = np.array([s, c, thetadot])
			
			sym_system = self.ToSymbolic()
			sym_context = sym_system.CreateDefaultContext()
			sym_context.SetContinuousState(z0+z)
			sym_context.FixInputPort(0, u0+ucon )
			f = sym_system.EvalTimeDerivatives(sym_context).CopyToVector() # - dztrajdt.value(t).transpose()
				
			# Construct a polynomial V that contains all monomials with s,c,thetadot up to degree n.
			V = prog.NewFreePolynomial(Variables(X), deg_V).ToExpression()
			eps = 1e-4
			constraint1 = prog.AddSosConstraint(V - eps*(X-x0).dot(X-x0)) # constraint to enforce that V is strictly positive away from x0.
			Vdot = V.Jacobian(X).dot(f) # Construct the polynomial which is the time derivative of V
			L = prog.NewFreePolynomial(Variables(X), deg_L).ToExpression() # Construct a polynomial L representing the "Lagrange multiplier".
			constraint2 = prog.AddSosConstraint(-Vdot - L*(x**2+y**2-1) -
                                    eps*(X-x0).dot(X-x0)*y**2) # Add a constraint that Vdot is strictly negative away from x0
			# Add V(0) = 0 constraint
			constraint3 = prog.AddLinearConstraint(V.Substitute({y: 0, x: 1, thetadot: 0}) == 0)
			# Add V(theta=xxx) = 1, just to set the scale.
			constraint4 = prog.AddLinearConstraint(V.Substitute({y: 1, x: 0, thetadot: 0}) == 1)
			# Call the solver.
			result = Solve(prog)
			Vsol = Polynomial(result.GetSolution(V))
			return Vsol
Ejemplo n.º 2
0
    def SOS_compute_1(self, S, rho_prev):
        # fix V and rho, search for L and u
        prog = MathematicalProgram()
        x = prog.NewIndeterminates(2, "x")

        # Define u
        K = prog.NewContinuousVariables(2, "K")

        # Fixed Lyapunov
        V = x.dot(np.dot(S, x))
        Vdot = Jacobian([V], x).dot(self.dynamics_K(x, K))[0]

        # Define the Lagrange multipliers.
        (lambda_, constraint) = prog.NewSosPolynomial(Variables(x), 2)
        prog.AddLinearConstraint(K[0] * x[0] <= 2.5)
        prog.AddSosConstraint(-Vdot - lambda_.ToExpression() * (rho_prev - V))

        result = prog.Solve()
        # print(lambda_.ToExpression())
        # print(lambda_.decision_variables())
        lc = [prog.GetSolution(var) for var in lambda_.decision_variables()]
        lbda_coeff = np.ones([3, 3])
        lbda_coeff[0, 0] = lc[0]
        lbda_coeff[0, 1] = lbda_coeff[1, 0] = lc[1]
        lbda_coeff[2, 0] = lbda_coeff[0, 2] = lc[2]
        lbda_coeff[1, 1] = lc[3]
        lbda_coeff[2, 1] = lbda_coeff[1, 2] = lc[4]
        lbda_coeff[2, 2] = lc[5]
        return lbda_coeff
Ejemplo n.º 3
0
    def SOS_compute_3(self, K, l_coeff, rho_max=10.):
        prog = MathematicalProgram()
        # fix u and lbda, search for V and rho
        x = prog.NewIndeterminates(2, "x")

        # get lbda from before
        l = np.array([x[1], x[0], 1])
        lbda = l.dot(np.dot(l_coeff, l))

        # rho is decision variable now
        rho = prog.NewContinuousVariables(1, "rho")[0]

        # create lyap V
        s = prog.NewContinuousVariables(4, "s")
        S = np.array([[s[0], s[1]], [s[2], s[3]]])
        V = x.dot(np.dot(S, x))
        Vdot = Jacobian([V], x).dot(self.dynamics_K(x, K))[0]

        prog.AddSosConstraint(V)
        prog.AddSosConstraint(-Vdot - lbda * (rho - V))

        prog.AddLinearCost(-rho)
        prog.AddLinearConstraint(rho <= rho_max)

        prog.Solve()
        rho = prog.GetSolution(rho)
        s = prog.GetSolution(s)
        return s, rho
Ejemplo n.º 4
0
    def SOS_compute_2(self, l_coeff, S, rho_max=10.):
        prog = MathematicalProgram()
        # fix V and lbda, searcu for u and rho
        x = prog.NewIndeterminates(2, "x")
        # get lbda from before
        l = np.array([x[1], x[0], 1])
        lbda = l.dot(np.dot(l_coeff, l))

        # Define u
        K = prog.NewContinuousVariables(2, "K")

        # Fixed Lyapunov
        V = x.dot(np.dot(S, x))
        Vdot = Jacobian([V], x).dot(self.dynamics_K(x, K))[0]

        # rho is decision variable now
        rho = prog.NewContinuousVariables(1, "rho")[0]

        prog.AddSosConstraint(-Vdot - lbda * (rho - V))

        prog.AddLinearConstraint(rho <= rho_max)
        prog.AddLinearCost(-rho)
        prog.Solve()
        rho = prog.GetSolution(rho)
        K = prog.GetSolution(K)
        return rho, K
Ejemplo n.º 5
0
def run_simple_mathematical_program():
    print "\n\nsimple mathematical program"
    mp = MathematicalProgram()
    x = mp.NewContinuousVariables(1, "x")
    mp.AddLinearCost(x[0] * 1.0)
    mp.AddLinearConstraint(x[0] >= 1)
    print mp.Solve()
    print mp.GetSolution(x)
    print "(finished) simple mathematical program"
Ejemplo n.º 6
0
    def intercepting_with_obs_avoidance(self,
                                        p0,
                                        v0,
                                        pf,
                                        vf,
                                        T,
                                        obstacles=None,
                                        p_puck=None):
        """Intercepting trajectory that avoids obs"""
        x0 = np.array(np.concatenate((p0, v0), axis=0))
        xf = np.concatenate((pf, vf), axis=0)
        prog = MathematicalProgram()

        # state and control inputs
        N = int(T / self.params.dt)  # number of time steps
        state = prog.NewContinuousVariables(N + 1, 4, 'state')
        cmd = prog.NewContinuousVariables(N, 2, 'input')

        # Initial and final state
        prog.AddLinearConstraint(eq(state[0], x0))
        #prog.AddLinearConstraint(eq(state[-1], xf))
        prog.AddQuadraticErrorCost(Q=10.0 * np.eye(4),
                                   x_desired=xf,
                                   vars=state[-1])

        self.add_dynamics(prog, N, state, cmd)

        ## Input saturation
        self.add_input_limits(prog, cmd, N)

        # Arena constraints
        self.add_arena_limits(prog, state, N)

        for k in range(N):
            prog.AddQuadraticCost(cmd[k].flatten().dot(cmd[k]))

        # Add non-linear constraints - will solve with SNOPT
        # Avoid other players
        if obstacles != None:
            for obs in obstacles:
                self.avoid_other_player_nl(prog, state, obs, N)

        # avoid hitting the puck while generating a kicking trajectory
        if not p_puck.any(None):
            self.avoid_puck_nl(prog, state, N, p_puck)

        solver = SnoptSolver()
        result = solver.Solve(prog)
        solution_found = result.is_success()
        if not solution_found:
            print("Solution not found for intercepting_with_obs_avoidance")

        u_values = result.GetSolution(cmd)
        return solution_found, u_values.T
Ejemplo n.º 7
0
    def ComputeControlInput(self, x, t):
        # Set up things you might want...
        q = x[0:self.nq]
        v = x[self.nq:]

        kinsol = self.hand.doKinematics(x)
        M = self.hand.massMatrix(kinsol)
        C = self.hand.dynamicsBiasTerm(kinsol, {}, None)
        B = self.hand.B

        # Assume grasp points are achieved, and calculate
        # contact jacobian at those points, as well as the current
        # grasp points and normals (in WORLD FRAME).
        grasp_points_world_now = self.transform_grasp_points_manipuland(q)
        grasp_normals_world_now = self.transform_grasp_normals_manipuland(q)
        J_manipuland = jacobian(
            self.transform_grasp_points_manipuland, q)
        ee_points_now = self.transform_grasp_points_fingers(q)
        J_manipulator = jacobian(
            self.transform_grasp_points_fingers, q)

        # The contact jacobian (Phi), for contact forces coming from
        # point contacts, describes how the movement of the contact point
        # maps into the joint coordinates of the robot. We can get this
        # by combining the jacobians derived from forward kinematics to each
        # of the two bodies that are in contact, evaluated at the contact point
        # in each body's frame.
        J_contact = J_manipuland - J_manipulator
        # Cut off the 3rd dimension, which should always be 0
        J_contact = J_contact[0:2, :, :]
        
        normals = grasp_normals_world_now[0:2, :].T

        # Given these, the manipulator equations enter as a linear constraint
        # M qdd + C = Bu + J_contact lambda
        # (the combined effects of lambda on the robot).
        # The list of grasp points, in manipuland frame, that we'll use.
        n_cf = len(self.grasp_points)
        # The evaluated desired manipuland posture.
        manipuland_qdes = self.GetDesiredObjectPosition(t)

        # The desired robot (arm) posture, calculated via inverse kinematics
        # to achieve the desired grasp points on the object in its current
        # target posture.
        qdes, info = self.ComputeTargetPosture(x, manipuland_qdes)
        if info != 1:
            if not self.shut_up:
                print "Warning: target posture IK solve got info %d " \
                      "when computing goal posture at least once during " \
                      "simulation. This means the grasp points was hard to " \
                      "achieve given the current object posture. This is " \
                      "occasionally OK, but indicates that your controller " \
                      "is probably struggling a little." % info
                self.shut_up = True

        # From here, it's up to you. Following the guidelines in the problem
        # set, implement a controller to achieve the specified goals.

        '''
        YOUR CODE HERE
        '''
        # This is not proper orthogonal vector...but it somehow makes it work...
        #    whereas the correct does not
        def ortho2(x):
            return np.array([x[0],-x[1]])
            #return np.array([x[1],-x[0]])

        prog = MathematicalProgram()
        
        dt = self.control_period
        
        u = prog.NewContinuousVariables(self.nu, "u")
        
        qdd = prog.NewContinuousVariables(q.shape[0], "qdd")
        for i in range(qdd.shape[0]):
            prog.AddLinearConstraint(qdd[i] <=  40)
            prog.AddLinearConstraint(qdd[i] >= -40)

            #prog.AddLinearConstraint(v[i] + qdd[i] * dt <=   80)
            #prog.AddLinearConstraint(v[i] - qdd[i] * dt >=  -80)
            
        beta = prog.NewContinuousVariables(n_cf, 2, "beta")
        #prog.AddQuadraticCost(0.1*np.sum(beta**2))
        for i in range(n_cf):
            prog.AddLinearConstraint(beta[i,0] >= 0)
            prog.AddLinearConstraint(beta[i,1] >= 0)
            prog.AddLinearConstraint(beta[i,0] <= 10)
            prog.AddLinearConstraint(beta[i,1] <= 10)
            
        c = np.zeros((n_cf,2,2))

        for i in range(n_cf):
            c[i,0] = normals[i] - self.mu * ortho2(normals[i])
            c[i,1] = normals[i] + self.mu * ortho2(normals[i])

        const = M.dot(qdd) + C - B.dot(u) ## eq 0

        for i in range(n_cf):
            lambda_i = c[i,0]*beta[i,0] + c[i,1]*beta[i,1]
            tmp = J_contact[:, i, :].T.dot(lambda_i)
            const -= tmp

        for i in range(const.shape[0]):
            prog.AddLinearConstraint(const[i] == 0.0)
        
        Kp = 1000
        Kd = 10
        
        #v2 = v + dt * qdd
        #q2 = q + v * dt + 0.5 * qdd * dt*dt
        
        v2 = v + dt * qdd
        q2 = q + (v+v2)/2 * dt #+ 0.5 * qdd * dt*dt

        #v2 = v + dt * qdd
        #q2 = q + v2 * dt
        prog.AddQuadraticCost(Kp * np.sum((qdes - q2) ** 2) + Kd * np.sum(v2**2))
        
        result = prog.Solve()
        u = prog.GetSolution(u)
        return u
Ejemplo n.º 8
0
def quadratic_program(H, f, A, b, C=None, d=None, tol=1.e-5, **kwargs):
    """
    Solves the strictly convex (H > 0) quadratic program min .5 x' H x + f' x s.t. A x <= b, C x  = d.

    Arguments
    ----------
    H : numpy.ndarray
        Positive definite Hessian of the cost function.
    f : numpy.ndarray
        Gradient of the cost function.
    A : numpy.ndarray
        Left-hand side of the inequality constraints.
    b : numpy.ndarray
        Right-hand side of the inequality constraints.
    C : numpy.ndarray
        Left-hand side of the equality constraints.
    d : numpy.ndarray
        Right-hand side of the equality constraints.
    tol : float
        Maximum value of a residual of an inequality to consider the related constraint active.

    Returns
    ----------
    sol : dict
        Dictionary with the solution of the QP.

        Fields
        ----------
        min : float
            Minimum of the QP (None if the problem is unfeasible).
        argmin : numpy.ndarray
            Argument that minimizes the QP (None if the problem is unfeasible).
        active_set : list of int
            Indices of the active inequallities {i | A_i argmin = b} (None if the problem is unfeasible).
        multiplier_inequality : numpy.ndarray
            Lagrange multipliers for the inequality constraints (None if the problem is unfeasible).
        multiplier_equality : numpy.ndarray
            Lagrange multipliers for the equality constraints (None if the problem is unfeasible or without equality constraints).
    """

    # check equalities
    if (C is None) != (d is None):
        raise ValueError('missing C or d.')

    # problem size
    n_ineq, n_x = A.shape
    if C is not None:
        n_eq = C.shape[0]
    else:
        n_eq = 0

    # build program
    prog = MathematicalProgram()
    x = prog.NewContinuousVariables(n_x)
    [prog.AddLinearConstraint(A[i].dot(x) <= b[i]) for i in range(n_ineq)]
    [prog.AddLinearConstraint(C[i].dot(x) == d[i]) for i in range(n_eq)]
    inequalities = []
    prog.AddQuadraticCost(.5*x.dot(H).dot(x) + f.dot(x))

    # solve
    solver = GurobiSolver()
    prog.SetSolverOption(solver.solver_type(), 'OutputFlag', 0)
    [prog.SetSolverOption(solver.solver_type(), parameter, value) for parameter, value in kwargs.items()]
    result = prog.Solve()

    # initialize output
    sol = {
        'min': None,
        'argmin': None,
        'active_set': None,
        'multiplier_inequality': None,
        'multiplier_equality': None
    }

    if result == SolutionResult.kSolutionFound:
        sol['argmin'] = prog.GetSolution(x)
        sol['min'] = .5*sol['argmin'].dot(H).dot(sol['argmin']) + f.dot(sol['argmin'])
        sol['active_set'] = np.where(A.dot(sol['argmin']) - b > -tol)[0].tolist()

        # retrieve multipliers through KKT conditions
        lhs = A[sol['active_set']]
        rhs = b[sol['active_set']]
        if n_eq > 0:
            lhs = np.vstack((lhs, C))
            rhs = np.concatenate((rhs, d))
        H_inv = np.linalg.inv(H)
        M = lhs.dot(H_inv).dot(lhs.T)
        m = - np.linalg.inv(M).dot(lhs.dot(H_inv).dot(f) + rhs)
        sol['multiplier_inequality'] = np.zeros(n_ineq)
        for i, j in enumerate(sol['active_set']):
            sol['multiplier_inequality'][j] = m[i]
        if n_eq > 0:
            sol['multiplier_equality'] = m[len(sol['active_set']):]

    return sol
Ejemplo n.º 9
0
    def compute_trajectory(self,
                           obst_idx,
                           x_out,
                           y_out,
                           ux_out,
                           uy_out,
                           pose_initial=[0., 0., 0., 0.],
                           dt=0.05):
        '''
		Find trajectory with MILP
		input u are tyhe velocities (xd, yd)
		dt 0.05 according to a rate of 20 Hz
		'''
        mp = MathematicalProgram()
        N = 30
        k = 0
        # define input trajectory and state traj
        u = mp.NewContinuousVariables(2, "u_%d" % k)  # xd yd
        input_trajectory = u
        st = mp.NewContinuousVariables(4, "state_%d" % k)
        # # binary variables for obstalces constraint
        c = mp.NewBinaryVariables(4 * self.ang_discret, "c_%d" % k)
        obs = c
        states = st
        for k in range(1, N):
            u = mp.NewContinuousVariables(2, "u_%d" % k)
            input_trajectory = np.vstack((input_trajectory, u))
            st = mp.NewContinuousVariables(4, "state_%d" % k)
            states = np.vstack((states, st))
            c = mp.NewBinaryVariables(4 * self.ang_discret, "c_%d" % k)
            obs = np.vstack((obs, c))
        st = mp.NewContinuousVariables(4, "state_%d" % (N + 1))
        states = np.vstack((states, st))
        c = mp.NewBinaryVariables(4 * self.ang_discret, "c_%d" % k)
        obs = np.vstack((obs, c))
        ### define cost
        mp.AddLinearCost(100 *
                         (-states[-1, 0]))  # go as far forward as possible
        # mp.AddQuadraticCost(states[-1,1]*states[-1,1])
        # time constraint
        M = 1000  # slack var for obst costraint
        # state constraint
        for i in range(2):  # initial state constraint x y yaw
            mp.AddLinearConstraint(states[0, i] <= pose_initial[i])
            mp.AddLinearConstraint(states[0, i] >= pose_initial[i])
        for i in range(2):  # initial state constraint xd yd yawd
            mp.AddLinearConstraint(states[0, i] <= pose_initial[2 + i] + 1)
            mp.AddLinearConstraint(states[0, i] >= pose_initial[2 + i] - 1)
        for i in range(N):
            # state update according to dynamics
            state_next = self.quad_dynamics(states[i, :],
                                            input_trajectory[i, :], dt)
            for j in range(4):
                mp.AddLinearConstraint(states[i + 1, j] <= state_next[j])
                mp.AddLinearConstraint(states[i + 1, j] >= state_next[j])
            # obstacle constraint
            for j in range(self.ang_discret - 1):
                mp.AddLinearConstraint(sum(obs[i, 4 * j:4 * j + 4]) <= 3)
                ang_min = self.angles[j]  # lower angle bound of obstacle
                ang_max = self.angles[j + 1]  # higher angle bound of obstaclee
                if int(obst_idx[j]) < (self.rng_discret -
                                       1):  # less than max range measured
                    rng_min = self.ranges[int(
                        obst_idx[j])]  # where the obst is at at this angle
                    rng_max = self.ranges[int(obst_idx[j] + 1)]
                    mp.AddLinearConstraint(
                        states[i, 0] <= rng_min - 0.05 +
                        M * obs[i, 4 * j])  # xi <= xobs,low + M*c
                    mp.AddLinearConstraint(
                        states[i, 0] >= rng_max + 0.005 -
                        M * obs[i, 4 * j + 1])  # xi >= xobs,high - M*c
                    mp.AddLinearConstraint(
                        states[i, 1] <= states[i, 0] * np.tan(ang_min) - 0.05 +
                        M * obs[i, 4 * j + 2])  # yi <= xi*tan(ang,min) + M*c
                    mp.AddLinearConstraint(
                        states[i, 1] >= states[i, 0] * np.tan(ang_max) + 0.05 -
                        M * obs[i, 4 * j + 3])  # yi >= ci*tan(ang,max) - M*c
            # environmnt constraint, dont leave fov
            mp.AddLinearConstraint(
                states[i, 1] >= states[i, 0] * np.tan(-self.theta))
            mp.AddLinearConstraint(
                states[i, 1] <= states[i, 0] * np.tan(self.theta))
            # bound the inputs
            # mp.AddConstraint(input_trajectory[i,:].dot(input_trajectory[i,:]) <= 2.5*2.5) # dosnt work with multi int
            mp.AddLinearConstraint(input_trajectory[i, 0] <= 2.5)
            mp.AddLinearConstraint(input_trajectory[i, 0] >= -2.5)
            mp.AddLinearConstraint(input_trajectory[i, 1] <= 0.5)
            mp.AddLinearConstraint(input_trajectory[i, 1] >= -0.5)

        mp.Solve()

        input_trajectory = mp.GetSolution(input_trajectory)
        trajectory = mp.GetSolution(states)
        x_out[:] = trajectory[:, 0]
        y_out[:] = trajectory[:, 1]
        ux_out[:] = input_trajectory[:, 0]
        uy_out[:] = input_trajectory[:, 1]
        return trajectory, input_trajectory
Ejemplo n.º 10
0
        def findLyapunovFunctionSOS(self, xtraj, utraj, deg_V, deg_L):
            times = xtraj.get_segment_times()

            prog = MathematicalProgram()

            # Declare the "indeterminates", x. These are the variables which define the polynomials
            x = prog.NewIndeterminates(3, 'x')
            ucon = prog.NewIndeterminates(2, 'u')
            sym_system = self.ToSymbolic()
            sym_context = sym_system.CreateDefaultContext()
            sym_context.SetContinuousState(x)
            sym_context.FixInputPort(0, ucon)
            f = sym_system.EvalTimeDerivativesTaylor(
                sym_context).CopyToVector()  # - dztrajdt.value(t).transpose()

            for t in times:
                x0 = xtraj.value(t).transpose()[0]
                u0 = utraj.value(t).transpose()[0]

                f_fb = self.EvalClosedLoopDynamics(x, ucon, f, x0, u0)

                # Construct a polynomial V that contains all monomials with s,c,thetadot up to degree n.
                V = prog.NewFreePolynomial(Variables(x), deg_V).ToExpression()
                eps = 1e-4
                constraint1 = prog.AddSosConstraint(
                    V - eps * (x - x0).dot(x - x0)
                )  # constraint to enforce that V is strictly positive away from x0.
                Vdot = V.Jacobian(x).dot(
                    f_fb
                )  # Construct the polynomial which is the time derivative of V
                #L = prog.NewFreePolynomial(Variables(x), deg_L).ToExpression() # Construct a polynomial L representing the
                # "Lagrange multiplier".
                # Add a constraint that Vdot is strictly negative away from x0
                constraint2 = prog.AddSosConstraint(-Vdot[0] - eps *
                                                    (x - x0).dot(x - x0))
                #import pdb; pdb.set_trace()
                # Add V(0) = 0 constraint
                constraint3 = prog.AddLinearConstraint(
                    V.Substitute({
                        x[0]: 0.0,
                        x[1]: 1.0,
                        x[2]: 0.0
                    }) == 1.0)
                # Add V(theta=xxx) = 1, just to set the scale.
                constraint4 = prog.AddLinearConstraint(
                    V.Substitute({
                        x[0]: 1.0,
                        x[1]: 0.0,
                        x[2]: 0.0
                    }) == 1.0)
                # Call the solver (default).
                #result = Solve(prog)
                # Call the solver (specific).
                #solver = CsdpSolver()
                #solver = ScsSolver()
                #solver = IpoptSolver()
                #result = solver.Solve(prog, None, None)
                result = Solve(prog)
                # print out the result.
                print("Success? ", result.is_success())
                print(result.get_solver_id().name())

                Vsol = Polynomial(result.GetSolution(V))

                print('Time t=%f:\nV=' % (t))
                print(Vsol.RemoveTermsWithSmallCoefficients(1e-6))

            return Vsol, Vsol.Jacobian(x).dot(f_fb)
Ejemplo n.º 11
0
    def compute_trajectory(self,
                           state_initial,
                           goal_state,
                           flight_time,
                           exact=False,
                           verbose=True):
        '''
        nonlinear trajectory optimization to land drone starting at state_initial, to a goal_state, in a specific flight_time

        :return: three return args separated by commas:

            trajectory, input_trajectory, time_array

            trajectory: a 2d array with N rows, and 6 columns. See simulate_states_over_time for more documentation.
            input_trajectory: a 2d array with N-1 row, and 4 columns. See simulate_states_over_time for more documentation.
            time_array: an array with N rows. 

        '''
        # initialize math program
        import time
        start_time = time.time()
        mp = MathematicalProgram()
        num_time_steps = int(min(40, flight_time / 0.05))
        dt = flight_time / num_time_steps
        time_array = np.arange(0.0, flight_time - 0.00001,
                               dt)  # hacky way to ensure it goes down
        num_time_steps = len(time_array)  # to ensure these are equal lenghts
        flight_time = dt * num_time_steps

        if verbose:
            print ''
            print 'solving problem with no guess'
            print 'initial state', state_initial
            print 'goal state', goal_state
            print 'flight time', flight_time
            print 'num time steps', num_time_steps
            print 'exact traj', exact
            print 'dt', dt

        booster = mp.NewContinuousVariables(3, "booster_0")
        booster_over_time = booster[np.newaxis, :]

        state = mp.NewContinuousVariables(6, "state_0")
        state_over_time = state[np.newaxis, :]

        for k in range(1, num_time_steps - 1):
            booster = mp.NewContinuousVariables(3, "booster_%d" % k)
            booster_over_time = np.vstack((booster_over_time, booster))
        for k in range(1, num_time_steps):
            state = mp.NewContinuousVariables(6, "state_%d" % k)
            state_over_time = np.vstack((state_over_time, state))

        # calculate states over time
        for i in range(1, len(time_array)):
            time_step = time_array[i] - time_array[i - 1]
            sim_next_state = state_over_time[
                i - 1, :] + time_step * self.drone_dynamics(
                    state_over_time[i - 1, :], booster_over_time[i - 1, :])

            # add constraints to restrict the next state to the decision variables
            for j in range(6):
                mp.AddLinearConstraint(sim_next_state[j] == state_over_time[i,
                                                                            j])

            # don't hit ground
            mp.AddLinearConstraint(state_over_time[i, 2] >= 0.05)

            # enforce that we must be thrusting within some constraints
            mp.AddLinearConstraint(booster_over_time[i - 1, 0] <= 5.0)
            mp.AddLinearConstraint(booster_over_time[i - 1, 0] >= -5.0)
            mp.AddLinearConstraint(booster_over_time[i - 1, 1] <= 5.0)
            mp.AddLinearConstraint(booster_over_time[i - 1, 1] >= -5.0)

            # keep forces in a reasonable position
            mp.AddLinearConstraint(booster_over_time[i - 1, 2] >= 1.0)
            mp.AddLinearConstraint(booster_over_time[i - 1, 2] <= 15.0)
            mp.AddLinearConstraint(
                booster_over_time[i - 1, 0] <= booster_over_time[i - 1, 2])
            mp.AddLinearConstraint(
                booster_over_time[i - 1, 0] >= -booster_over_time[i - 1, 2])
            mp.AddLinearConstraint(
                booster_over_time[i - 1, 1] <= booster_over_time[i - 1, 2])
            mp.AddLinearConstraint(
                booster_over_time[i - 1, 1] >= -booster_over_time[i - 1, 2])

        # add constraints on initial state
        for i in range(6):
            mp.AddLinearConstraint(state_over_time[0, i] == state_initial[i])

        # add constraints on end state
        # 100 should be a lower constant...
        mp.AddLinearConstraint(
            state_over_time[-1, 0] <= goal_state[0] + flight_time / 100.)
        mp.AddLinearConstraint(
            state_over_time[-1, 0] >= goal_state[0] - flight_time / 100.)
        mp.AddLinearConstraint(
            state_over_time[-1, 1] <= goal_state[1] + flight_time / 100.)
        mp.AddLinearConstraint(
            state_over_time[-1, 1] >= goal_state[1] - flight_time / 100.)
        mp.AddLinearConstraint(
            state_over_time[-1, 2] <= goal_state[2] + flight_time / 100.)
        mp.AddLinearConstraint(
            state_over_time[-1, 2] >= goal_state[2] - flight_time / 100.)
        mp.AddLinearConstraint(
            state_over_time[-1, 3] <= goal_state[3] + flight_time / 100.)
        mp.AddLinearConstraint(
            state_over_time[-1, 3] >= goal_state[3] - flight_time / 100.)
        mp.AddLinearConstraint(
            state_over_time[-1, 4] <= goal_state[4] + flight_time / 100.)
        mp.AddLinearConstraint(
            state_over_time[-1, 4] >= goal_state[4] - flight_time / 100.)
        mp.AddLinearConstraint(
            state_over_time[-1, 5] <= goal_state[5] + flight_time / 100.)
        mp.AddLinearConstraint(
            state_over_time[-1, 5] >= goal_state[5] - flight_time / 100.)

        # add the cost function
        mp.AddQuadraticCost(
            10. * booster_over_time[:, 0].dot(booster_over_time[:, 0]))
        mp.AddQuadraticCost(
            10. * booster_over_time[:, 1].dot(booster_over_time[:, 1]))
        mp.AddQuadraticCost(
            10. * booster_over_time[:, 2].dot(booster_over_time[:, 2]))

        for i in range(1, len(time_array) - 1):
            cost_multiplier = np.exp(
                4.5 * i / (len(time_array) -
                           1))  # exp starting at 1 and going to around 90
            # penalize difference_in_state
            dist_to_goal_pos = goal_state[:3] - state_over_time[i - 1, :3]
            mp.AddQuadraticCost(cost_multiplier *
                                dist_to_goal_pos.dot(dist_to_goal_pos))
            # penalize difference in velocity
            if exact:
                dist_to_goal_vel = goal_state[-3:] - state_over_time[i - 1,
                                                                     -3:]
                mp.AddQuadraticCost(cost_multiplier / 2. *
                                    dist_to_goal_vel.dot(dist_to_goal_vel))
            else:
                dist_to_goal_vel = goal_state[-3:] - state_over_time[i - 1,
                                                                     -3:]
                mp.AddQuadraticCost(cost_multiplier / 6. *
                                    dist_to_goal_vel.dot(dist_to_goal_vel))

        solved = mp.Solve()
        if verbose:
            print solved

        # extract
        booster_over_time = mp.GetSolution(booster_over_time)

        output_states = mp.GetSolution(state_over_time)

        durations = time_array[1:len(time_array
                                     )] - time_array[0:len(time_array) - 1]
        fuel_consumption = (
            np.sum(booster_over_time[:len(time_array)]**2, axis=1) *
            durations).sum()
        if verbose:
            print 'expected remaining fuel consumption', fuel_consumption
            print("took %s seconds" % (time.time() - start_time))
            print 'goal state', goal_state
            print 'end state', output_states[-1]
            print ''

        return output_states, booster_over_time, time_array
Ejemplo n.º 12
0
import numpy as np
from pydrake.all import Quaternion
from pydrake.all import MathematicalProgram, Solve, eq, le, ge, SolverOptions, SnoptSolver
import pdb

prog = MathematicalProgram()
x = prog.NewContinuousVariables(rows=1, name='x')
y = prog.NewContinuousVariables(rows=1, name='y')
slack = prog.NewContinuousVariables(rows=1, name="slack")

prog.AddConstraint(eq(x * y, slack))
prog.AddLinearConstraint(ge(x, 0))
prog.AddLinearConstraint(ge(y, 0))
prog.AddLinearConstraint(le(x, 1))
prog.AddLinearConstraint(le(y, 1))
prog.AddCost(1e5 * (slack**2)[0])
prog.AddCost(-(2 * x[0] + y[0]))

solver = SnoptSolver()
result = solver.Solve(prog)
print(
    f"Success: {result.is_success()}, x = {result.GetSolution(x)}, y = {result.GetSolution(y)}, slack = {result.GetSolution(slack)}"
)
Ejemplo n.º 13
0
    def PlanGraspPoints(self):
        # First, extract the bounding geometry of the object.
        # Generally, our geometry is coming from 3d models, so
        # we have to do some legwork to extract 2D geometry. For
        # the examples we'll use in this set, we'll assume
        # that extracting the convex hull of the first visual element
        # is a good representation of the object geometry. (This is
        # not great! But it'll do the job for us, since we're going
        # to experiment with only simple objects.)
        kinsol = self.hand.doKinematics(
            self.x_nom[0:self.hand.get_num_positions()])
        self.manipuland_link_index = \
            self.hand.FindBody(self.manipuland_link_name).get_body_index()
        body = self.hand.get_body(self.manipuland_link_index)
        # For projecting into XY plane
        Tview = np.array([[1., 0., 0., 0.], [0., 1., 0., 0.], [0., 0., 0.,
                                                               1.]])
        all_points = ExtractPlanarObjectGeometryConvexHull(body, Tview)

        # For later use: precompute the fingertip positions of the
        # robot in the nominal posture.
        nominal_fingertip_points = np.empty((2, self.num_fingers))
        for i in range(self.num_fingers):
            nominal_fingertip_points[:, i] = self.hand.transformPoints(
                kinsol, self.fingertip_position, self.finger_link_indices[i],
                0)[0:2, 0]

        # Search for an optimal grasp with N points
        random.seed(42)
        np.random.seed(42)

        def get_random_point_and_normal_on_surface(all_points):
            num_points = all_points.shape[1]
            i = random.choice(range(num_points - 1))
            first_point = np.asarray([all_points[0][i], all_points[1][i]])
            second_point = np.asarray(
                [all_points[0][i + 1], all_points[1][i + 1]])
            first_to_second = second_point - first_point
            # Clip to avoid interpolating close to a corner
            interpolate_param = np.clip(np.random.rand(), 0.2, 0.8)
            rand_point = first_point + interpolate_param * first_to_second

            normal = np.array([-first_to_second[1], first_to_second[0]]) \
                / np.linalg.norm(first_to_second)
            return rand_point, normal

        best_conv_volume = 0
        best_points = []
        best_normals = []
        best_finger_assignments = []
        for i in range(self.n_grasp_search_iters):
            grasp_points = []
            normals = []
            for j in range(self.num_fingers):
                point, normal = \
                    get_random_point_and_normal_on_surface(all_points)
                # check for duplicates or close points -- fingertip
                # radius is 0.2, so require twice that margin to avoid
                # intersection fingertips.
                num_rejected = 0
                while min([1.0] + [
                        np.linalg.norm(grasp_point - point, 2)
                        for grasp_point in grasp_points
                ]) <= 0.4:
                    point, normal = \
                        get_random_point_and_normal_on_surface(all_points)
                    num_rejected += 1
                    if num_rejected > 10000:
                        print "Rejected 10000 points in a row due to crowding." \
                              " Your object is a bit too small for your number of" \
                              " fingers."
                        break
                grasp_points.append(point)
                normals.append(normal)
            if achieves_force_closure(grasp_points, normals, self.mu):
                # Test #1: Rank in terms of convex hull volume of grasp points
                volume = compute_convex_hull_volume(grasp_points)
                if volume < best_conv_volume:
                    continue

                # Test #2: Does IK work for this point?
                self.grasp_points = grasp_points
                self.grasp_normals = normals

                # Pick optimal finger assignment that
                # minimizes distance between fingertips in the
                # nominal posture, and the chosen grasp points
                grasp_points_world = self.transform_grasp_points_manipuland(
                    self.x_nom)[0:2, :]

                prog = MathematicalProgram()
                # We'd *really* like these to be binary variables,
                # but unfortunately don't have a MIP solver available in the
                # course docker container. Instead, we'll solve an LP,
                # and round the result to the nearest feasible integer
                # solutions. Intuitively, this LP should probably reach its
                # optimal value at an extreme point (where the variables
                # all take integer value). I've not yet observed this
                # not occuring in practice!
                assignment_vars = prog.NewContinuousVariables(
                    self.num_fingers, self.num_fingers, "assignment")
                for grasp_i in range(self.num_fingers):
                    # Every row and column of assignment vars sum to one --
                    # each finger matches to one grasp position
                    prog.AddLinearConstraint(
                        np.sum(assignment_vars[:, grasp_i]) == 1.)
                    prog.AddLinearConstraint(
                        np.sum(assignment_vars[grasp_i, :]) == 1.)
                    for finger_i in range(self.num_fingers):
                        # If this grasp assignment is active,
                        # add a cost equal to the distance between
                        # nominal pose and grasp position
                        prog.AddLinearCost(
                            assignment_vars[grasp_i, finger_i] *
                            np.linalg.norm(grasp_points_world[:, grasp_i] -
                                           nominal_fingertip_points[:,
                                                                    finger_i]))
                        prog.AddBoundingBoxConstraint(
                            0., 1., assignment_vars[grasp_i, finger_i])
                result = Solve(prog)
                assignments = result.GetSolution(assignment_vars)
                # Round assignments to nearest feasible set
                claimed = [False] * self.num_fingers
                for grasp_i in range(self.num_fingers):
                    order = np.argsort(assignments[grasp_i, :])
                    fill_i = self.num_fingers - 1
                    while claimed[order[fill_i]] is not False:
                        fill_i -= 1
                    if fill_i < 0:
                        raise Exception("Finger association failed. "
                                        "Horrible bug. Tell Greg")
                    assignments[grasp_i, :] *= 0.
                    assignments[grasp_i, order[fill_i]] = 1.
                    claimed[order[fill_i]] = True

                # Populate actual assignments
                self.grasp_finger_assignments = []
                for grasp_i in range(self.num_fingers):
                    for finger_i in range(self.num_fingers):
                        if assignments[grasp_i, finger_i] == 1.:
                            self.grasp_finger_assignments.append(
                                (finger_i, np.array(self.fingertip_position)))
                            continue

                qinit, info = self.ComputeTargetPosture(
                    self.x_nom,
                    self.x_nom[(self.nq - 3):self.nq],
                    backoff_distance=0.2)
                if info != 1:
                    continue

                best_conv_volume = volume
                best_points = grasp_points
                best_normals = normals
                best_finger_assignments = self.grasp_finger_assignments

        if len(best_points) == 0:
            print "After %d attempts, couldn't find a good grasp "\
                  "for this object." % self.n_grasp_search_iters
            print "Proceeding with a horrible random guess."
            best_points = [
                np.random.uniform(-1., 1., 2) for i in range(self.num_fingers)
            ]
            best_normals = [
                np.random.uniform(-1., 1., 2) for i in range(self.num_fingers)
            ]
            best_finger_assignments = [(i, self.fingertip_position)
                                       for i in range(self.num_fingers)]

        self.grasp_points = best_points
        self.grasp_normals = best_normals
        self.grasp_finger_assignments = best_finger_assignments
Ejemplo n.º 14
0
    def intercepting_with_obs_avoidance_bb(self,
                                           p0,
                                           v0,
                                           pf,
                                           vf,
                                           T,
                                           obstacles=None,
                                           p_puck=None):
        """kick while avoiding obstacles using big M formulation and branch and bound"""
        x0 = np.array(np.concatenate((p0, v0), axis=0))
        xf = np.concatenate((pf, vf), axis=0)
        prog = MathematicalProgram()

        # state and control inputs
        N = int(T / self.params.dt)  # number of command steps
        state = prog.NewContinuousVariables(N + 1, 4, 'state')
        cmd = prog.NewContinuousVariables(N, 2, 'input')

        # Initial and final state
        prog.AddLinearConstraint(eq(state[0], x0))
        prog.AddLinearConstraint(eq(state[-1], xf))

        self.add_dynamics(prog, N, state, cmd)

        ## Input saturation
        self.add_input_limits(prog, cmd, N)

        # Arena constraints
        self.add_arena_limits(prog, state, N)

        # Add Mixed-integer constraints - will solve with BB

        # avoid hitting the puck while generating a kicking trajectory
        # MILP formulation with B&B solver
        x_obs_puck = prog.NewBinaryVariables(rows=N + 1,
                                             cols=2)  # obs x_min, obs x_max
        y_obs_puck = prog.NewBinaryVariables(rows=N + 1,
                                             cols=2)  # obs y_min, obs y_max
        self.avoid_puck_bigm(prog, x_obs_puck, y_obs_puck, state, N, p_puck)

        # Avoid other players
        if obstacles != None:
            x_obs_player = list()
            y_obs_player = list()
            for i, obs in enumerate(obstacles):
                x_obs_player.append(prog.NewBinaryVariables(
                    rows=N + 1, cols=2))  # obs x_min, obs x_max
                y_obs_player.append(prog.NewBinaryVariables(
                    rows=N + 1, cols=2))  # obs y_min, obs y_max
                self.avoid_other_player_bigm(prog, x_obs_player[i],
                                             y_obs_player[i], state, obs, N)

        # Solve with simple b&b solver
        for k in range(N):
            prog.AddQuadraticCost(cmd[k].flatten().dot(cmd[k]))

        bb = branch_and_bound.MixedIntegerBranchAndBound(
            prog,
            OsqpSolver().solver_id())
        result = bb.Solve()
        if result != result.kSolutionFound:
            raise ValueError('Infeasible optimization problem.')
        u_values = np.array([bb.GetSolution(u) for u in cmd]).T
        solution_found = result.kSolutionFound
        return solution_found, u_values
Ejemplo n.º 15
0
def achieves_force_closure(points, normals, mu, debug=False):
    """
    Determines whether the given forces achieve force closure.

    Note that each of points and normals are lists of variable 
    length, but should be the same length.

    Here's an example valid set of input args:
        points  = [np.asarray([0.1, 0.2]), np.asarray([0.5,-0.1])]
        normals = [np.asarray([-1.0,0.0]), np.asarray([0.0, 1.0])]
        mu = 0.2
        achieves_force_closure(points, normals, mu)

    NOTE: your normals should be normalized (be unit vectors)

    :param points: a list of arrays where each array is the
        position of the force points relative to the center of mass.
    :type points: a list of 1-d numpy arrays of shape (2,)

    :param normals: a list of arrays where each array is the normal
        of the local surface of the object, pointing in towards
        the object
    :type normals: a list of 1-d numpy arrays of shape (2,)

    :param mu: coulombic kinetic friction coefficient
    :type mu: float, greater than 0.

    :return: whether or not the given parameters achieves force closure
    :rtype: bool (True or False)
    """
    assert len(points) == len(normals)
    assert mu >= 0.0
    assert_unit_normals(normals)

    ## YOUR CODE HERE
    G = get_G(points, normals)
    if not is_full_row_rank(G):
        print("G is not full row rank")
        return False

    N_points = len(points)
    max_force = 1000

    mp = MathematicalProgram()

    ## Decision vars
    # force tangent to surface
    forces_x = mp.NewContinuousVariables(N_points, "forces_x")
    # force normal to surface
    forces_z = mp.NewContinuousVariables(N_points, "forces_z")
    # -1<=slack_var<=0 used to convert inequalities to strict inequalities
    slack_var = mp.NewContinuousVariables(1, "slack_var")

    # put force vars in single array
    forces = [None] * 2 * N_points
    for point_idx in range(N_points):
        forces[point_idx * 2] = forces_x[point_idx]
        forces[point_idx * 2 + 1] = forces_z[point_idx]

    # ensure forces/moments add to zero
    for row_idx in range(np.shape(G)[0]):
        # 3 rows (LMB_x, LMB_z, AMB) = 0 for static system
        mp.AddLinearConstraint(G[row_idx, :].dot(forces) == 0)
        if debug:
            print("Static force/moment constraints = 0:")
            print(G[row_idx, :].dot(forces))

    # ensure forces stay within friction cone and use slack to avoid trivial 0 solution
    for point_idx in range(N_points):
        # normal force must be positive (slack allows us to catch if it's zero)
        mp.AddLinearConstraint(forces_z[point_idx] + slack_var[0] >= 0)
        mp.AddLinearConstraint(
            forces_x[point_idx] <= mu * forces_z[point_idx] + slack_var[0])
        mp.AddLinearConstraint(
            forces_x[point_idx] >= -(mu * forces_z[point_idx] + slack_var[0]))

    # restrict slack
    mp.AddLinearConstraint(slack_var[0] <= 0)
    mp.AddLinearConstraint(slack_var[0] >= -1)
    mp.AddLinearCost(slack_var[0])

    # restrict solution within bounds
    for force in forces:
        mp.AddLinearConstraint(force >= -max_force)
        mp.AddLinearConstraint(force <= max_force)

    result = Solve(mp)
    if (not result.is_success()):
        print("solver failed to find solution")
        return False

    gamma = result.GetSolution(slack_var)
    if (gamma < 0):
        print("acheived force closure, gamma = {}".format(gamma))
        if debug:
            x = result.GetSolution(forces_x)
            z = result.GetSolution(forces_z)
            print("solution forces_x: {}".format(x))
            print("solution forces_z: {}".format(z))
        return True
    else:
        print("only trivial solution with 0 forces found")
        return False
Ejemplo n.º 16
0
    def solve(self, quad_start_q, quad_final_q, time_used):
        mp = MathematicalProgram()

        # We want to solve this for a certain number of knot points
        N = 40  # num knot points
        time_increment = time_used / (N + 1)
        dt = time_increment
        time_array = np.arange(0.0, time_used, time_increment)

        quad_u = mp.NewContinuousVariables(2, "u_0")
        quad_q = mp.NewContinuousVariables(6, "quad_q_0")

        for i in range(1, N):
            u = mp.NewContinuousVariables(2, "u_%d" % i)
            quad = mp.NewContinuousVariables(6, "quad_q_%d" % i)

            quad_u = np.vstack((quad_u, u))
            quad_q = np.vstack((quad_q, quad))

        for i in range(N):
            mp.AddLinearConstraint(quad_u[i][0] <= 3.0)  # force
            mp.AddLinearConstraint(quad_u[i][0] >= 0.0)  # force
            mp.AddLinearConstraint(quad_u[i][1] <= 10.0)  # torque
            mp.AddLinearConstraint(quad_u[i][1] >= -10.0)  # torque

            mp.AddLinearConstraint(quad_q[i][0] <= 1000.0)  # pos x
            mp.AddLinearConstraint(quad_q[i][0] >= -1000.0)
            mp.AddLinearConstraint(quad_q[i][1] <= 1000.0)  # pos y
            mp.AddLinearConstraint(quad_q[i][1] >= -1000.0)
            mp.AddLinearConstraint(
                quad_q[i][2] <= 60.0 * np.pi / 180.0)  # pos theta
            mp.AddLinearConstraint(quad_q[i][2] >= -60.0 * np.pi / 180.0)
            mp.AddLinearConstraint(quad_q[i][3] <= 1000.0)  # vel x
            mp.AddLinearConstraint(quad_q[i][3] >= -1000.0)
            mp.AddLinearConstraint(quad_q[i][4] <= 1000.0)  # vel y
            mp.AddLinearConstraint(quad_q[i][4] >= -1000.0)
            mp.AddLinearConstraint(quad_q[i][5] <= 10.0)  # vel theta
            mp.AddLinearConstraint(quad_q[i][5] >= -10.0)

        for i in range(1, N):
            quad_q_dyn_feasible = self.dynamics(quad_q[i - 1, :],
                                                quad_u[i - 1, :], dt)

            # Direct transcription constraints on states to dynamics
            for j in range(6):
                quad_state_err = (quad_q[i][j] - quad_q_dyn_feasible[j])
                eps = 0.001
                mp.AddConstraint(quad_state_err <= eps)
                mp.AddConstraint(quad_state_err >= -eps)

        # Initial and final quad and ball states
        for j in range(6):
            mp.AddLinearConstraint(quad_q[0][j] == quad_start_q[j])
            mp.AddLinearConstraint(quad_q[-1][j] == quad_final_q[j])

        # Quadratic cost on the control input
        R_force = 10.0
        R_torque = 100.0
        Q_quad_x = 100.0
        Q_quad_y = 100.0
        mp.AddQuadraticCost(R_force * quad_u[:, 0].dot(quad_u[:, 0]))
        mp.AddQuadraticCost(R_torque * quad_u[:, 1].dot(quad_u[:, 1]))
        mp.AddQuadraticCost(Q_quad_x * quad_q[:, 0].dot(quad_q[:, 1]))
        mp.AddQuadraticCost(Q_quad_y * quad_q[:, 1].dot(quad_q[:, 1]))

        # Solve the optimization
        print "Number of decision vars: ", mp.num_vars()

        # mp.SetSolverOption(SolverType.kSnopt, "Major iterations limit", 100000)

        print "Solve: ", mp.Solve()

        quad_traj = mp.GetSolution(quad_q)
        input_traj = mp.GetSolution(quad_u)

        return (quad_traj, input_traj, time_array)
def achieves_force_closure(points, normals, mu):
    """
    Determines whether the given forces achieve force closure.

    Note that each of points and normals are lists of variable 
    length, but should be the same length.

    Here's an example valid set of input args:
        points  = [np.asarray([0.1, 0.2]), np.asarray([0.5,-0.1])]
        normals = [np.asarray([-1.0,0.0]), np.asarray([0.0, 1.0])]
        mu = 0.2
        achieves_force_closure(points, normals, mu)

    NOTE: your normals should be normalized (be unit vectors)

    :param points: a list of arrays where each array is the
        position of the force points relative to the center of mass.
    :type points: a list of 1-d numpy arrays of shape (2,)

    :param normals: a list of arrays where each array is the normal
        of the local surface of the object, pointing in towards
        the object
    :type normals: a list of 1-d numpy arrays of shape (2,)

    :param mu: coulombic kinetic friction coefficient
    :type mu: float, greater than 0.

    :return: whether or not the given parameters achieves force closure
    :rtype: bool (True or False)
    """
    assert len(points) == len(normals)
    assert mu >= 0.0

    # print('Number of Points: ' + str(len(points)))

    from pydrake.all import MathematicalProgram, Solve
    mp = MathematicalProgram()

    forces = mp.NewContinuousVariables(2 * len(points), "forces")
    y = mp.NewContinuousVariables(1, "y")

    g = get_G(points, normals)
    # print('Is Full Row Rank? => ' + str(is_full_row_rank(g)))
    if (not is_full_row_rank(g)):
        return False

    w = g.dot(forces)

    mp.AddLinearConstraint(w[0] == 0)
    mp.AddLinearConstraint(w[1] == 0)
    mp.AddLinearConstraint(w[2] == 0)
    #     mp.AddLinearEqualityConstraint(g, np.zeros((3, 1)), forces)

    mp.AddLinearConstraint(y[0] >= -1)
    mp.AddLinearConstraint(y[0] <= 0)

    for i in range(len(points)):
        mp.AddLinearConstraint(mu * forces[2 * i + 1] >= forces[2 * i])
        mp.AddLinearConstraint(mu * forces[2 * i + 1] >= -forces[2 * i])
        mp.AddLinearConstraint(forces[2 * i + 1] >= -y[0])

    mp.AddLinearCost(y[0])
    result = Solve(mp)
    forces_solution = result.GetSolution(forces)
    y_solution = result.GetSolution(y)

    # print('forces_solution: ' + str(forces_solution))
    # print('w: ' + str(g.dot(forces_solution)))
    # print('y_solution: ' + str(y_solution))

    if y_solution == 0:
        return False
    else:
        return True
    def compute_control(self, x_p1, x_p2, x_puck, p_goal, obstacles):
        """Solve for initial velocity for the puck to bounce the wall once and hit the goal."""
        """Currently does not work"""
        # initialize program
        N = self.mpc_params.N  # length of receding horizon
        prog = MathematicalProgram()

        # State and input variables
        x1 = prog.NewContinuousVariables(N + 1, 4,
                                         name='p1_state')  # state of player 1
        u1 = prog.NewContinuousVariables(
            N, 2, name='p1_input')  # inputs for player 1
        xp = prog.NewContinuousVariables(
            N + 1, 4, name='puck_state')  # state of the puck

        # Slack variables
        t1_kick = prog.NewContinuousVariables(
            N + 1, name='kick_time'
        )  # slack variables that captures if player 1 is kicking or not at the given time step
        # Defined as continous, but treated as mixed integer. 1 when kicking
        v1_kick = prog.NewContinuousVariables(
            N + 1, 2, name='v1_kick')  # velocity of player after kick to puck
        vp_kick = prog.NewContinuousVariables(
            N + 1, 2, name='vp_kick'
        )  # velocity of puck after being kicked by the player
        dist = prog.NewContinuousVariables(
            N + 1, name='dist_puck_player')  # distance between puck and player
        cost = prog.NewContinuousVariables(
            N + 1, name='cost')  # slack variable to monitor cost

        # Compute distance between puck and player as slack variable.
        for k in range(N + 1):
            r1 = self.sim_params.player_radius
            rp = self.sim_params.puck_radius
            prog.AddConstraint(
                dist[k] == (x1[k, 0:2] - xp[k, 0:2]).dot(x1[k, 0:2] -
                                                         xp[k, 0:2]) -
                (r1 + rp)**2)

        #### COST and final states
        # TODO: find adequate final velocity
        x_puck_des = np.concatenate(
            (p_goal, np.zeros(2)), axis=0)  # desired position and vel for puck
        for k in range(N + 1):
            Q_dist_puck_goal = 10 * np.eye(2)
            q_dist_puck_player = 0.1
            e1 = x_puck_des[0:2] - xp[k, 0:2]
            e2 = xp[k, 0:2] - x1[k, 0:2]
            prog.AddConstraint(
                cost[k] == e1.dot(np.matmul(Q_dist_puck_goal, e1)) +
                q_dist_puck_player * dist[k])
            prog.AddCost(cost[k])
            #prog.AddQuadraticErrorCost(Q=self.mpc_params.Q_puck, x_desired=x_puck_des, vars=xp[k])  # puck in the goal
            #prog.AddQuadraticErrorCost(Q=np.eye(2), x_desired=x_puck_des[0:2], vars=x1[k, 0:2])
            #prog.AddQuadraticErrorCost(Q=10*np.eye(2), x_desired=np.array([2, 2]), vars=x1[k, 0:2]) # TEST: control position of the player instead of puck
        #for k in range(N):
        #    prog.AddQuadraticCost(1e-2*u1[k].dot(u1[k]))                 # be wise on control effort

        # Initial states for puck and player
        prog.AddBoundingBoxConstraint(x_p1, x_p1,
                                      x1[0])  # Intial state for player 1
        prog.AddBoundingBoxConstraint(x_puck, x_puck,
                                      xp[0])  # Initial state for puck

        # Compute elastic collision for every possible timestep.
        for k in range(N + 1):
            v_puck_bfr = xp[k, 2:4]
            v_player_bfr = x1[k, 2:4]
            v_puck_aft, v_player_aft = self.get_reset_velocities(
                v_puck_bfr, v_player_bfr)
            prog.AddConstraint(eq(vp_kick[k], v_puck_aft))
            prog.AddConstraint(eq(v1_kick[k], v_player_aft))

        # Use slack variable to activate guard condition based on distance.
        M = 15**2
        for k in range(N + 1):
            prog.AddLinearConstraint(dist[k] <= M * (1 - t1_kick[k]))
            prog.AddLinearConstraint(dist[k] >= -t1_kick[k] * M)

        # Hybrid dynamics for player
        #for k in range(N):
        #    A = self.mpc_params.A_player
        #    B = self.mpc_params.B_player
        #    x1_kick = np.concatenate((x1[k][0:2], v1_kick[k]), axis=0) # The state of the player after it kicks the puck
        #    x1_next = np.matmul(A, (1 - t1_kick[k])*x1[k] + t1_kick[k]*x1_kick) + np.matmul(B, u1[k])
        #    prog.AddConstraint(eq(x1[k+1], x1_next))

        # Assuming player dynamics are not affected by collision
        for k in range(N):
            A = self.mpc_params.A_player
            B = self.mpc_params.B_player
            x1_next = np.matmul(A, x1[k]) + np.matmul(B, u1[k])
            prog.AddConstraint(eq(x1[k + 1], x1_next))

        # Hybrid dynamics for puck_mass
        for k in range(N):
            A = self.mpc_params.A_puck
            xp_kick = np.concatenate(
                (xp[k][0:2], vp_kick[k]),
                axis=0)  # State of the puck after it gets kicked
            xp_next = np.matmul(A, (1 - t1_kick[k]) * xp[k] +
                                t1_kick[k] * xp_kick)
            prog.AddConstraint(eq(xp[k + 1], xp_next))

        # Generate trajectory that is not in direct collision with the puck
        for k in range(N + 1):
            eps = 0.1
            prog.AddConstraint(dist[k] >= -eps)

        # Input and arena constraint
        self.add_input_limits(prog, u1, N)
        self.add_arena_limits(prog, x1, N)
        self.add_arena_limits(prog, xp, N)

        # fake mixed-integer constraint
        #for k in range(N+1):
        #    prog.AddConstraint(t1_kick[k]*(1-t1_kick[k])==0)

        # Hot-start
        guess_u1, guess_x1 = self.get_initial_guess(x_p1, p_goal, x_puck[0:2])
        prog.SetInitialGuess(x1, guess_x1)
        prog.SetInitialGuess(u1, guess_u1)
        if not self.prev_xp is None:
            prog.SetInitialGuess(xp, self.prev_xp)
            #prog.SetInitialGuess(t1_kick, np.ones_like(t1_kick))

        # solve for the periods
        # solver = SnoptSolver()
        #result = solver.Solve(prog)

        #if not result.is_success():
        #    print("Unable to find solution.")

        # save for hot-start
        #self.prev_xp = result.GetSolution(xp)

        #if True:
        #    print("x1:{}".format(result.GetSolution(x1)))
        #    print("u1: {}".format( result.GetSolution(u1)))
        #    print("xp: {}".format( result.GetSolution(xp)))
        #   print('dist{}'.format(result.GetSolution(dist)))
        #    print("t1_kick:{}".format(result.GetSolution(t1_kick)))
        #    print("v1_kick:{}".format(result.GetSolution(v1_kick)))
        #   print("vp_kick:{}".format(result.GetSolution(vp_kick)))
        #    print("cost:{}".format(result.GetSolution(cost)))

        # return whether successful, and the initial player velocity
        #u1_opt = result.GetSolution(u1)
        return True, guess_u1[0, :], np.zeros((2))
Ejemplo n.º 19
0
    def _DoCalcDiscreteVariableUpdates(self, context, events, discrete_state):
        # Call base method to ensure we do not get recursion.
        # (This makes sure relevant event handlers get called.)
        LeafSystem._DoCalcDiscreteVariableUpdates(self, context, events,
                                                  discrete_state)

        new_control_input = discrete_state. \
            get_mutable_vector().get_mutable_value()
        x = self.EvalVectorInput(
            context, self.robot_state_input_port.get_index()).get_value()
        setpoint = self.EvalAbstractInput(
            context, self.setpoint_input_port.get_index()).get_value()

        q_full = x[:self.nq]
        v_full = x[self.nq:]
        q = x[self.controlled_inds]
        v = x[self.nq:][self.controlled_inds]

        kinsol = self.rbt.doKinematics(q_full, v_full)

        M_full = self.rbt.massMatrix(kinsol)
        C = self.rbt.dynamicsBiasTerm(kinsol, {}, None)[self.controlled_inds]
        # Python slicing doesn't work in 2D, so do it row-by-row
        M = np.zeros((self.nq_reduced, self.nq_reduced))
        for k in range(self.nq_reduced):
            M[:, k] = M_full[self.controlled_inds, k]

        # Pick a qdd that results in minimum deviation from the desired
        # end effector pose (according to the end effector frame's jacobian
        # at the current state)
        # v_next = v + control_period * qdd
        # q_next = q + control_period * (v + v_next) / 2.
        # ee_v = J*v
        # ee_p = from forward kinematics
        # ee_v_next = J*v_next
        # ee_p_next = ee_p + control_period * (ee_v + ee_v_next) / 2.
        # min  u and qdd
        #        || q_next - q_des ||_Kq
        #     +  || v_next - v_des ||_Kv
        #     +  || qdd ||_Ka
        #     +  || Kee_v - ee_v_next ||_Kee_pt
        #     +  || Kee_pt - ee_p_next ||_Kee_v
        #     +  the messily-implemented angular ones?
        # s.t. M qdd + C = B u
        # (no contact modeling for now)
        prog = MathematicalProgram()
        qdd = prog.NewContinuousVariables(self.nq_reduced, "qdd")
        u = prog.NewContinuousVariables(self.nu, "u")

        prog.AddQuadraticCost(qdd.T.dot(setpoint.Ka).dot(qdd))

        v_next = v + self.control_period * qdd
        q_next = q + self.control_period * (v + v_next) / 2.
        if setpoint.v_des is not None:
            v_err = setpoint.v_des - v_next
            prog.AddQuadraticCost(v_err.T.dot(setpoint.Kv).dot(v_err))
        if setpoint.q_des is not None:
            q_err = setpoint.q_des - q_next
            prog.AddQuadraticCost(q_err.T.dot(setpoint.Kq).dot(q_err))

        if setpoint.ee_frame is not None and setpoint.ee_pt is not None:
            # Convert x to autodiff for Jacobians computation
            q_full_ad = np.empty(self.nq, dtype=AutoDiffXd)
            for i in range(self.nq):
                der = np.zeros(self.nq)
                der[i] = 1
                q_full_ad.flat[i] = AutoDiffXd(q_full.flat[i], der)
            kinsol_ad = self.rbt.doKinematics(q_full_ad)

            tf_ad = self.rbt.relativeTransform(kinsol_ad, 0, setpoint.ee_frame)

            # Compute errors in EE pt position and velocity (in world frame)
            ee_p_ad = tf_ad[0:3, 0:3].dot(setpoint.ee_pt) + tf_ad[0:3, 3]
            ee_p = np.hstack([y.value() for y in ee_p_ad])

            J_ee = np.vstack([y.derivatives()
                              for y in ee_p_ad]).reshape(3, self.nq)
            J_ee = J_ee[:, self.controlled_inds]

            ee_v = J_ee.dot(v)
            ee_v_next = J_ee.dot(v_next)
            ee_p_next = ee_p + self.control_period * (ee_v + ee_v_next) / 2.

            if setpoint.ee_pt_des is not None:
                ee_p_err = setpoint.ee_pt_des.reshape(
                    (3, 1)) - ee_p_next.reshape((3, 1))
                prog.AddQuadraticCost(
                    (ee_p_err.T.dot(setpoint.Kee_pt).dot(ee_p_err))[0, 0])
            if setpoint.ee_v_des is not None:
                ee_v_err = setpoint.ee_v_des.reshape(
                    (3, 1)) - ee_v_next.reshape((3, 1))
                prog.AddQuadraticCost(
                    (ee_v_err.T.dot(setpoint.Kee_v).dot(ee_v_err))[0, 0])

            # Also compute errors in EE cardinal vector directions vs targets in world frame
            for i, vec in enumerate(
                (setpoint.ee_x_des, setpoint.ee_y_des, setpoint.ee_z_des)):
                if vec is not None:
                    direction = np.zeros(3)
                    direction[i] = 1.
                    ee_dir_ad = tf_ad[0:3, 0:3].dot(direction)
                    ee_dir_p = np.hstack([y.value() for y in ee_dir_ad])
                    J_ee_dir = np.vstack([y.derivatives() for y in ee_dir_ad
                                          ]).reshape(3, self.nq)
                    J_ee_dir = J_ee_dir[:, self.controlled_inds]
                    ee_dir_v = J_ee_dir.dot(v)
                    ee_dir_v_next = J_ee_dir.dot(v_next)
                    ee_dir_p_next = ee_dir_p + self.control_period * (
                        ee_dir_v + ee_dir_v_next) / 2.
                    ee_dir_p_err = vec.reshape((3, 1)) - ee_dir_p_next.reshape(
                        (3, 1))
                    prog.AddQuadraticCost((ee_dir_p_err.T.dot(
                        setpoint.Kee_xyz).dot(ee_dir_p_err))[0, 0])
                    prog.AddQuadraticCost((ee_dir_v_next.T.dot(
                        setpoint.Kee_xyzd).dot(ee_dir_v_next)))

        LHS = np.dot(M, qdd) + C
        RHS = np.dot(self.B, u)
        for i in range(self.nq_reduced):
            prog.AddLinearConstraint(LHS[i] == RHS[i])

        result = prog.Solve()
        u = prog.GetSolution(u)
        new_control_input[:] = u
from pydrake.all import MathematicalProgram, Solve, Polynomial, Variables

prog = MathematicalProgram()
x = prog.NewIndeterminates(2, "x")
f = [-x[0] - 2 * x[1]**2, -x[1] - x[0] * x[1] - 2 * x[1]**3]

V = prog.NewSosPolynomial(Variables(x), 2)[0].ToExpression()
prog.AddLinearConstraint(V.Substitute({x[0]: 0, x[1]: 0}) == 0)
prog.AddLinearConstraint(V.Substitute({x[0]: 1, x[1]: 0}) == 1)
Vdot = V.Jacobian(x).dot(f)

prog.AddSosConstraint(-Vdot)

result = Solve(prog)
assert result.is_success()

print("V = " + str(
    Polynomial(result.GetSolution(V)).RemoveTermsWithSmallCoefficients(1e-5)))
Ejemplo n.º 21
0
    def MILP_compute_traj(self,
                          obst_idx,
                          x_out,
                          y_out,
                          dx,
                          dy,
                          pose_initial=[0., 0.]):
        '''
		Find trajectory with MILP
		Outputs trajectory (waypoints) and new K for control 
		'''
        mp = MathematicalProgram()
        N = 8
        k = 0
        # define state traj
        st = mp.NewContinuousVariables(2, "state_%d" % k)
        # # binary variables for obstalces constraint
        c = mp.NewBinaryVariables(4 * self.ang_discret, "c_%d" % k)
        obs = c
        states = st
        for k in range(1, N):
            st = mp.NewContinuousVariables(2, "state_%d" % k)
            states = np.vstack((states, st))
            c = mp.NewBinaryVariables(4 * self.ang_discret, "c_%d" % k)
            obs = np.vstack((obs, c))
        st = mp.NewContinuousVariables(2, "state_%d" % (N + 1))
        states = np.vstack((states, st))
        c = mp.NewBinaryVariables(4 * self.ang_discret, "c_%d" % k)
        obs = np.vstack((obs, c))
        # variables encoding max x y dist from obstacle
        x_margin = mp.NewContinuousVariables(1, "x_margin")
        y_margin = mp.NewContinuousVariables(1, "y_margin")
        ### define cost
        for i in range(N):
            mp.AddLinearCost(-states[i, 0])  # go as far forward as possible
        mp.AddLinearCost(-states[-1, 0])
        mp.AddLinearCost(-x_margin[0])
        mp.AddLinearCost(-y_margin[0])
        # bound x y margin so it doesn't explode
        mp.AddLinearConstraint(x_margin[0] <= 3.)
        mp.AddLinearConstraint(y_margin[0] <= 3.)
        # x y is non ngative adn at least above robot radius
        mp.AddLinearConstraint(x_margin[0] >= 0.05)
        mp.AddLinearConstraint(y_margin[0] >= 0.05)
        M = 1000  # slack var for integer things
        # state constraint
        for i in range(2):  # initial state constraint x y
            mp.AddLinearConstraint(states[0, i] <= pose_initial[i])
            mp.AddLinearConstraint(states[0, i] >= pose_initial[i])
        for i in range(N):
            mp.AddQuadraticCost((states[i + 1, 1] - states[i, 1])**2)
            mp.AddLinearConstraint(states[i + 1, 0] <= states[i, 0] + dx)
            mp.AddLinearConstraint(states[i + 1, 0] >= states[i, 0] - dx)
            mp.AddLinearConstraint(states[i + 1, 1] <= states[i, 1] + dy)
            mp.AddLinearConstraint(states[i + 1, 1] >= states[i, 1] - dy)
            # obstacle constraint
            for j in range(self.ang_discret - 1):
                mp.AddLinearConstraint(sum(obs[i, 4 * j:4 * j + 4]) <= 3)
                ang_min = self.angles[j]  # lower angle bound of obstacle
                ang_max = self.angles[j + 1]  # higher angle bound of obstaclee
                if int(obst_idx[j]) < (self.rng_discret -
                                       1):  # less than max range measured
                    rng_min = self.ranges[int(
                        obst_idx[j])]  # where the obst is at at this angle
                    rng_max = self.ranges[int(obst_idx[j] + 1)]
                    mp.AddLinearConstraint(
                        states[i, 0] <= rng_min - x_margin[0] +
                        M * obs[i, 4 * j])  # xi <= xobs,low + M*c
                    mp.AddLinearConstraint(
                        states[i, 0] >= rng_max + x_margin[0] -
                        M * obs[i, 4 * j + 1])  # xi >= xobs,high - M*c
                    mp.AddLinearConstraint(
                        states[i, 1] <=
                        states[i, 0] * np.tan(ang_min) - y_margin[0] +
                        M * obs[i, 4 * j + 2])  # yi <= xi*tan(ang,min) + M*c
                    mp.AddLinearConstraint(
                        states[i, 1] >=
                        states[i, 0] * np.tan(ang_max) + y_margin[0] -
                        M * obs[i, 4 * j + 3])  # yi >= ci*tan(ang,max) - M*c
        # obstacle constraint for last state
        for j in range(self.ang_discret - 1):
            mp.AddLinearConstraint(sum(obs[N, 4 * j:4 * j + 4]) <= 3)
            ang_min = self.angles[j]  # lower angle bound of obstacle
            ang_max = self.angles[j + 1]  # higher angle bound of obstaclee
            if int(obst_idx[j]) < (self.rng_discret -
                                   1):  # less than max range measured
                rng_min = self.ranges[int(
                    obst_idx[j])]  # where the obst is at at this angle
                rng_max = self.ranges[int(obst_idx[j] + 1)]
                mp.AddLinearConstraint(
                    states[N, 0] <= rng_min - x_margin[0] +
                    M * obs[N, 4 * j])  # xi <= xobs,low + M*c
                mp.AddLinearConstraint(
                    states[N, 0] >= rng_max + x_margin[0] -
                    M * obs[N, 4 * j + 1])  # xi >= xobs,high - M*c
                mp.AddLinearConstraint(
                    states[N,
                           1] <= states[N, 0] * np.tan(ang_min) - y_margin[0] +
                    M * obs[N, 4 * j + 2])  # yi <= xi*tan(ang,min) + M*c
                mp.AddLinearConstraint(
                    states[N,
                           1] >= states[N, 0] * np.tan(ang_max) + y_margin[0] -
                    M * obs[N, 4 * j + 3])  # yi >= ci*tan(ang,max) - M*c

        mp.Solve()

        trajectory = mp.GetSolution(states)
        xm = mp.GetSolution(x_margin)
        ym = mp.GetSolution(y_margin)
        x_out[:] = trajectory[:, 0]
        y_out[:] = trajectory[:, 1]
        return trajectory, xm[0], ym[0]
Ejemplo n.º 22
0
def solveOptimization(state_init,
                      t_impact,
                      impact_combination,
                      T,
                      u_guess=None,
                      x_guess=None,
                      h_guess=None):
    prog = MathematicalProgram()
    h = prog.NewContinuousVariables(T, name='h')
    u = prog.NewContinuousVariables(rows=T + 1,
                                    cols=2 * n_quadrotors,
                                    name='u')
    x = prog.NewContinuousVariables(rows=T + 1,
                                    cols=6 * n_quadrotors + 4 * n_balls,
                                    name='x')
    dv = prog.decision_variables()

    prog.AddBoundingBoxConstraint([h_min] * T, [h_max] * T, h)

    for i in range(n_quadrotors):
        sys = Quadrotor2D()
        context = sys.CreateDefaultContext()
        dir_coll_constr = DirectCollocationConstraint(sys, context)
        ind_x = 6 * i
        ind_u = 2 * i

        for t in range(T):
            impact_indices = impact_combination[np.argmax(
                np.abs(t - t_impact) <= 1)]
            quad_ind, ball_ind = impact_indices[0], impact_indices[1]

            if quad_ind == i and np.any(
                    t == t_impact
            ):  # Don't add Direct collocation constraint at impact
                continue
            elif quad_ind == i and (np.any(t == t_impact - 1)
                                    or np.any(t == t_impact + 1)):
                prog.AddConstraint(
                    eq(
                        x[t + 1,
                          ind_x:ind_x + 3], x[t, ind_x:ind_x + 3] + h[t] *
                        x[t + 1, ind_x + 3:ind_x + 6]))  # Backward euler
                prog.AddConstraint(
                    eq(x[t + 1, ind_x + 3:ind_x + 6], x[t,
                                                        ind_x + 3:ind_x + 6])
                )  # Zero-acceleration assumption during this time step. Should maybe replace with something less naive
            else:
                AddDirectCollocationConstraint(
                    dir_coll_constr, np.array([[h[t]]]),
                    x[t, ind_x:ind_x + 6].reshape(-1, 1),
                    x[t + 1, ind_x:ind_x + 6].reshape(-1, 1),
                    u[t, ind_u:ind_u + 2].reshape(-1, 1),
                    u[t + 1, ind_u:ind_u + 2].reshape(-1, 1), prog)

    for i in range(n_balls):
        sys = Ball2D()
        context = sys.CreateDefaultContext()
        dir_coll_constr = DirectCollocationConstraint(sys, context)
        ind_x = 6 * n_quadrotors + 4 * i

        for t in range(T):
            impact_indices = impact_combination[np.argmax(
                np.abs(t - t_impact) <= 1)]
            quad_ind, ball_ind = impact_indices[0], impact_indices[1]

            if ball_ind == i and np.any(
                    t == t_impact
            ):  # Don't add Direct collocation constraint at impact
                continue
            elif ball_ind == i and (np.any(t == t_impact - 1)
                                    or np.any(t == t_impact + 1)):
                prog.AddConstraint(
                    eq(
                        x[t + 1,
                          ind_x:ind_x + 2], x[t, ind_x:ind_x + 2] + h[t] *
                        x[t + 1, ind_x + 2:ind_x + 4]))  # Backward euler
                prog.AddConstraint(
                    eq(x[t + 1,
                         ind_x + 2:ind_x + 4], x[t, ind_x + 2:ind_x + 4] +
                       h[t] * np.array([0, -9.81])))
            else:
                AddDirectCollocationConstraint(
                    dir_coll_constr, np.array([[h[t]]]),
                    x[t, ind_x:ind_x + 4].reshape(-1, 1),
                    x[t + 1, ind_x:ind_x + 4].reshape(-1, 1),
                    u[t, 0:0].reshape(-1, 1), u[t + 1, 0:0].reshape(-1,
                                                                    1), prog)

    # Initial conditions
    prog.AddLinearConstraint(eq(x[0, :], state_init))

    # Final conditions
    prog.AddLinearConstraint(eq(x[T, 0:14], state_final[0:14]))
    # Quadrotor final conditions (full state)
    for i in range(n_quadrotors):
        ind = 6 * i
        prog.AddLinearConstraint(
            eq(x[T, ind:ind + 6], state_final[ind:ind + 6]))

    # Ball final conditions (position only)
    for i in range(n_balls):
        ind = 6 * n_quadrotors + 4 * i
        prog.AddLinearConstraint(
            eq(x[T, ind:ind + 2], state_final[ind:ind + 2]))

    # Input constraints
    for i in range(n_quadrotors):
        prog.AddLinearConstraint(ge(u[:, 2 * i], -20.0))
        prog.AddLinearConstraint(le(u[:, 2 * i], 20.0))
        prog.AddLinearConstraint(ge(u[:, 2 * i + 1], -20.0))
        prog.AddLinearConstraint(le(u[:, 2 * i + 1], 20.0))

    # Don't allow quadrotor to pitch more than 60 degrees
    for i in range(n_quadrotors):
        prog.AddLinearConstraint(ge(x[:, 6 * i + 2], -np.pi / 3))
        prog.AddLinearConstraint(le(x[:, 6 * i + 2], np.pi / 3))

    # Ball position constraints
    # for i in range(n_balls):
    #     ind_i = 6*n_quadrotors + 4*i
    #     prog.AddLinearConstraint(ge(x[:,ind_i],-2.0))
    #     prog.AddLinearConstraint(le(x[:,ind_i], 2.0))
    #     prog.AddLinearConstraint(ge(x[:,ind_i+1],-3.0))
    #     prog.AddLinearConstraint(le(x[:,ind_i+1], 3.0))

    # Impact constraint
    quad_temp = Quadrotor2D()

    for i in range(n_quadrotors):
        for j in range(n_balls):
            ind_q = 6 * i
            ind_b = 6 * n_quadrotors + 4 * j
            for t in range(T):
                if np.any(
                        t == t_impact
                ):  # If quad i and ball j impact at time t, add impact constraint
                    impact_indices = impact_combination[np.argmax(
                        t == t_impact)]
                    quad_ind, ball_ind = impact_indices[0], impact_indices[1]
                    if quad_ind == i and ball_ind == j:
                        # At impact, witness function == 0
                        prog.AddConstraint(lambda a: np.array([
                            CalcClosestDistanceQuadBall(a[0:3], a[3:5])
                        ]).reshape(1, 1),
                                           lb=np.zeros((1, 1)),
                                           ub=np.zeros((1, 1)),
                                           vars=np.concatenate(
                                               (x[t, ind_q:ind_q + 3],
                                                x[t,
                                                  ind_b:ind_b + 2])).reshape(
                                                      -1, 1))
                        # At impact, enforce discrete collision update for both ball and quadrotor
                        prog.AddConstraint(
                            CalcPostCollisionStateQuadBallResidual,
                            lb=np.zeros((6, 1)),
                            ub=np.zeros((6, 1)),
                            vars=np.concatenate(
                                (x[t, ind_q:ind_q + 6], x[t, ind_b:ind_b + 4],
                                 x[t + 1, ind_q:ind_q + 6])).reshape(-1, 1))
                        prog.AddConstraint(
                            CalcPostCollisionStateBallQuadResidual,
                            lb=np.zeros((4, 1)),
                            ub=np.zeros((4, 1)),
                            vars=np.concatenate(
                                (x[t, ind_q:ind_q + 6], x[t, ind_b:ind_b + 4],
                                 x[t + 1, ind_b:ind_b + 4])).reshape(-1, 1))

                        # rough constraints to enforce hitting center-ish of paddle
                        prog.AddLinearConstraint(
                            x[t, ind_q] - x[t, ind_b] >= -0.01)
                        prog.AddLinearConstraint(
                            x[t, ind_q] - x[t, ind_b] <= 0.01)
                        continue
                # Everywhere else, witness function must be > 0
                prog.AddConstraint(lambda a: np.array([
                    CalcClosestDistanceQuadBall(a[ind_q:ind_q + 3], a[
                        ind_b:ind_b + 2])
                ]).reshape(1, 1),
                                   lb=np.zeros((1, 1)),
                                   ub=np.inf * np.ones((1, 1)),
                                   vars=x[t, :].reshape(-1, 1))

    # Don't allow quadrotor collisions
    # for t in range(T):
    #     for i in range(n_quadrotors):
    #         for j in range(i+1, n_quadrotors):
    #             prog.AddConstraint((x[t,6*i]-x[t,6*j])**2 + (x[t,6*i+1]-x[t,6*j+1])**2 >= 0.65**2)

    # Quadrotors stay on their own side
    # prog.AddLinearConstraint(ge(x[:, 0], 0.3))
    # prog.AddLinearConstraint(le(x[:, 6], -0.3))

    ###############################################################################
    # Set up initial guesses
    initial_guess = np.empty(prog.num_vars())

    # # initial guess for the time step
    prog.SetDecisionVariableValueInVector(h, h_guess, initial_guess)

    x_init[0, :] = state_init
    prog.SetDecisionVariableValueInVector(x, x_guess, initial_guess)

    prog.SetDecisionVariableValueInVector(u, u_guess, initial_guess)

    solver = SnoptSolver()
    print("Solving...")
    result = solver.Solve(prog, initial_guess)

    # print(GetInfeasibleConstraints(prog,result))
    # be sure that the solution is optimal
    assert result.is_success()

    print(f'Solution found? {result.is_success()}.')

    #################################################################################
    # Extract results
    # get optimal solution
    h_opt = result.GetSolution(h)
    x_opt = result.GetSolution(x)
    u_opt = result.GetSolution(u)
    time_breaks_opt = np.array([sum(h_opt[:t]) for t in range(T + 1)])
    u_opt_poly = PiecewisePolynomial.ZeroOrderHold(time_breaks_opt, u_opt.T)
    # x_opt_poly = PiecewisePolynomial.Cubic(time_breaks_opt, x_opt.T, False)
    x_opt_poly = PiecewisePolynomial.FirstOrderHold(
        time_breaks_opt, x_opt.T
    )  # Switch to first order hold instead of cubic because cubic was taking too long to create
    #################################################################################
    # Create list of K matrices for time varying LQR
    context = quad_plant.CreateDefaultContext()
    breaks = copy.copy(
        time_breaks_opt)  #np.linspace(0, x_opt_poly.end_time(), 100)

    K_samples = np.zeros((breaks.size, 12 * n_quadrotors))

    for i in range(n_quadrotors):
        K = None
        for j in range(breaks.size):
            context.SetContinuousState(
                x_opt_poly.value(breaks[j])[6 * i:6 * (i + 1)])
            context.FixInputPort(
                0,
                u_opt_poly.value(breaks[j])[2 * i:2 * (i + 1)])
            linear_system = FirstOrderTaylorApproximation(quad_plant, context)
            A = linear_system.A()
            B = linear_system.B()
            try:
                K, _, _ = control.lqr(A, B, Q, R)
            except:
                assert K is not None, "Failed to calculate initial K for quadrotor " + str(
                    i)
                print("Warning: Failed to calculate K at timestep", j,
                      "for quadrotor", i, ". Using K from previous timestep")

            K_samples[j, 12 * i:12 * (i + 1)] = K.reshape(-1)

    K_poly = PiecewisePolynomial.ZeroOrderHold(breaks, K_samples.T)

    return u_opt_poly, x_opt_poly, K_poly, h_opt
Ejemplo n.º 23
0
    prog.AddConstraint(eq(q[t+1], q[t] + h[t] * qd[t+1]))
    prog.AddConstraint(eq(qd[t+1], qd[t] + h[t] * qdd[t]))

# manipulator equations for all t (implicit Euler)
for t in range(T):
    vars = np.concatenate((q[t+1], qd[t+1], qdd[t], f[t]))
    prog.AddConstraint(manipulator_equations, lb=[0]*nq, ub=[0]*nq, vars=vars)
    
# velocity reset across heel strike
# see http://underactuated.mit.edu/multibody.html#impulsive_collision
vars = np.concatenate((q[-1], qd[-1], qd_post, imp))
prog.AddConstraint(reset_velocity_heelstrike, lb=[0]*(nq+nf), ub=[0]*(nq+nf), vars=vars)
    
# mirror initial and final configuration
# see "The Walking Cycle" section of this notebook
prog.AddLinearConstraint(eq(q[0], - q[-1]))

# mirror constraint between initial and final velocity
# see "The Walking Cycle" section of this notebook
prog.AddLinearConstraint(qd[0, 0] == 0)
prog.AddLinearConstraint(qd[0, 1] == 0)
prog.AddLinearConstraint(qd[0, 2] == qd_post[2] + qd_post[3])
prog.AddLinearConstraint(qd[0, 3] == - qd_post[3])

"""Now it is your turn to complete the optimization problem.
You need to add five groups of constraints:
1. **Stance foot on the ground for all times.**
This `LinearConstraint` must ensure that $x(t) = y(t) = 0$ for all $t$.
2. **Swing foot on the ground at time zero.**
This constraint must ensure that the initial configuration $\mathbf{q}(0)$ is such that the swing foot is on the ground.
For a more complex robot, this would be a tough nonlinear constraint.
constraint1 = prog.AddSosConstraint(V - eps*(x-x0).dot(x-x0))

# Construct the polynomial which is the time derivative of V.
Vdot = V.Jacobian(x).dot(f)

# Construct a polynomial L representing the "Lagrange multiplier".
deg_L = 2
L = prog.NewFreePolynomial(Variables(x), deg_L).ToExpression()

# Add a constraint that Vdot is strictly negative away from x0 (but make an
# exception for the upright fixed point by multipling by s^2).
constraint2 = prog.AddSosConstraint(-Vdot - L*(s**2+c**2-1) -
                                    eps*(x-x0).dot(x-x0)*s**2)

# Add V(0) = 0 constraint
constraint3 = prog.AddLinearConstraint(
    V.Substitute({s: 0, c: 1, thetadot: 0}) == 0)

# Add V(theta=pi) = mgl, just to set the scale.
constraint4 = prog.AddLinearConstraint(
    V.Substitute({s: 1, c: 0, thetadot: 0}) == p.mass()*p.gravity()*p.length())

# Call the solver.
result = Solve(prog)
assert(result.is_success())

# Note that I've added mgl to the potential energy (relative to the textbook),
# so that it would be non-negative... like the Lyapunov function.
mgl = p.mass()*p.gravity()*p.length()
print('Mechanical Energy = ')
print(.5*p.mass()*p.length()**2*thetadot**2 + mgl*(1-c))
Ejemplo n.º 25
0
    ], q[k]).evaluator().set_description(f"q[{k}] unit quaternion constraint"))
    # Impose unit length constraint on the rotation axis.
    (prog.AddConstraint(lambda x: [x @ x], [1], [1],
                        w_axis[k]).evaluator().set_description(
                            f"w_axis[{k}] unit axis constraint"))
for k in range(1, N):
    (prog.AddConstraint(
        lambda q_qprev_v_dt: backward_euler(q_qprev_v_dt),
        lb=[0.0] * 4,
        ub=[0.0] * 4,
        vars=np.concatenate([
            q[k], q[k - 1], w_axis[k], w_mag[k], dt[k]
        ])).evaluator().set_description(f"q[{k}] backward euler constraint"))

(prog.AddLinearConstraint(eq(q[0], np.array(
    [1.0, 0.0, 0.0,
     0.0]))).evaluator().set_description("Initial orientation constraint"))
(prog.AddLinearConstraint(
    eq(
        q[-1],
        np.array([
            -0.2955511242573139, 0.25532186031279896, 0.5106437206255979,
            0.7659655809383968
        ]))).evaluator().set_description("Final orientation constraint"))
(prog.AddLinearConstraint(
    ge(dt,
       0.0)).evaluator().set_description("Timestep greater than 0 constraint"))
(prog.AddConstraint(
    np.sum(dt) == 1.0).evaluator().set_description("Total time constriant"))

for k in range(N):
Ejemplo n.º 26
0
    def compute_trajectory_to_other_world(self, state_initial, minimum_time,
                                          maximum_time):
        '''
        Your mission is to implement this function.

        A successful implementation of this function will compute a dynamically feasible trajectory
        which satisfies these criteria:
            - Efficiently conserve fuel
            - Reach "orbit" of the far right world
            - Approximately obey the dynamic constraints
            - Begin at the state_initial provided
            - Take no more than maximum_time, no less than minimum_time

        The above are defined more precisely in the provided notebook.

        Please note there are two return args.

        :param: state_initial: :param state_initial: numpy array of length 4, see rocket_dynamics for documentation
        :param: minimum_time: float, minimum time allowed for trajectory
        :param: maximum_time: float, maximum time allowed for trajectory

        :return: three return args separated by commas:

            trajectory, input_trajectory, time_array

            trajectory: a 2d array with N rows, and 4 columns. See simulate_states_over_time for more documentation.
            input_trajectory: a 2d array with N-1 row, and 2 columns. See simulate_states_over_time for more documentation.
            time_array: an array with N rows. 

        '''

        mp = MathematicalProgram()

        max_speed = 0.99
        desired_distance = 0.5
        u_cost_factor = 1000.
        N = 50
        #         trajectory = np.zeros((N+1,4))
        #         input_trajectory = np.ones((N,2))*10.0
        time_used = (maximum_time + minimum_time) / 2.
        time_step = time_used / (N + 1)
        time_array = np.arange(0.0, time_used, time_step)

        k = 0
        # Create continuous variables for u & x
        u = mp.NewContinuousVariables(2, "u_%d" % k)
        x = mp.NewContinuousVariables(4, "x_%d" % k)
        u_over_time = u
        x_over_time = x

        for k in range(1, N):
            u = mp.NewContinuousVariables(2, "u_%d" % k)
            x = mp.NewContinuousVariables(4, "x_%d" % k)
            u_over_time = np.vstack((u_over_time, u))
            x_over_time = np.vstack((x_over_time, x))

        # trajectory is N+1 in size
        x = mp.NewContinuousVariables(4, "x_%d" % (N + 1))
        x_over_time = np.vstack((x_over_time, x))

        # Add cost
        #         for k in range(0, N):
        mp.AddQuadraticCost(u_cost_factor *
                            u_over_time[:, 0].dot(u_over_time[:, 0]))
        mp.AddQuadraticCost(u_cost_factor *
                            u_over_time[:, 1].dot(u_over_time[:, 1]))

        # Add constraint for initial state
        mp.AddLinearConstraint(x_over_time[0, 0] >= state_initial[0])
        mp.AddLinearConstraint(x_over_time[0, 0] <= state_initial[0])
        mp.AddLinearConstraint(x_over_time[0, 1] >= state_initial[1])
        mp.AddLinearConstraint(x_over_time[0, 1] <= state_initial[1])
        mp.AddLinearConstraint(x_over_time[0, 2] >= state_initial[2])
        mp.AddLinearConstraint(x_over_time[0, 2] <= state_initial[2])
        mp.AddLinearConstraint(x_over_time[0, 3] >= state_initial[3])
        mp.AddLinearConstraint(x_over_time[0, 3] <= state_initial[3])

        # Add constraint between x & u
        for k in range(1, N + 1):
            next_step = self.rocket_dynamics(x_over_time[k - 1, :],
                                             u_over_time[k - 1, :])
            mp.AddConstraint(x_over_time[k, 0] == x_over_time[k - 1, 0] +
                             time_step * next_step[0])
            mp.AddConstraint(x_over_time[k, 1] == x_over_time[k - 1, 1] +
                             time_step * next_step[1])
            mp.AddConstraint(x_over_time[k, 2] == x_over_time[k - 1, 2] +
                             time_step * next_step[2])
            mp.AddConstraint(x_over_time[k, 3] == x_over_time[k - 1, 3] +
                             time_step * next_step[3])

        # Make sure it never goes too far from the planets
#         for k in range(1, N):
#             mp.AddConstraint(self.two_norm(x_over_time[k,0:2] - self.world_2_position[:]) <= 10)
#             mp.AddConstraint(self.two_norm(x_over_time[k,0:2] - self.world_1_position[:]) <= 10)

# Make sure u never goes above a threshold
        max_u = 6.
        for k in range(0, N):
            mp.AddLinearConstraint(u_over_time[k, 0] <= max_u)
            mp.AddLinearConstraint(-u_over_time[k, 0] <= max_u)
            mp.AddLinearConstraint(u_over_time[k, 1] <= max_u)
            mp.AddLinearConstraint(-u_over_time[k, 1] <= max_u)

        # Make sure it reaches world 2
        mp.AddConstraint(
            self.two_norm(x_over_time[-1, 0:2] -
                          self.world_2_position) <= desired_distance)
        mp.AddConstraint(
            self.two_norm(x_over_time[-1, 0:2] -
                          self.world_2_position) >= desired_distance)

        # Make sure it's speed isn't too high
        mp.AddConstraint(self.two_norm(x_over_time[-1, 2:4]) <= max_speed**2.)

        # Get result
        result = Solve(mp)
        x_over_time_result = result.GetSolution(x_over_time)
        u_over_time_result = result.GetSolution(u_over_time)
        print("Success", result.is_success())
        print("Final position", x_over_time_result[-1, :])
        print(
            "Final distance to world2",
            self.two_norm(x_over_time_result[-1, 0:2] - self.world_2_position))
        print("Final speed", self.two_norm(x_over_time_result[-1, 2:4]))
        print("Fuel consumption", (u_over_time_result**2.).sum())

        trajectory = x_over_time_result
        input_trajectory = u_over_time_result
        return trajectory, input_trajectory, time_array
Ejemplo n.º 27
0
    def ComputeControlInput(self, x, t):
        # Set up things you might want...
        q = x[0:self.nq]
        v = x[self.nq:]

        kinsol = self.hand.doKinematics(x[0:self.hand.get_num_positions()])
        M = self.hand.massMatrix(kinsol)
        C = self.hand.dynamicsBiasTerm(kinsol, {}, None)
        B = self.hand.B

        # Assume grasp points are achieved, and calculate
        # contact jacobian at those points, as well as the current
        # grasp points and normals (in WORLD FRAME).
        grasp_points_world_now = self.transform_grasp_points_manipuland(q)
        grasp_normals_world_now = self.transform_grasp_normals_manipuland(q)
        J_manipuland = jacobian(self.transform_grasp_points_manipuland, q)
        ee_points_now = self.transform_grasp_points_fingers(q)
        J_manipulator = jacobian(self.transform_grasp_points_fingers, q)

        # The contact jacobian (Phi), for contact forces coming from
        # point contacts, describes how the movement of the contact point
        # maps into the joint coordinates of the robot. We can get this
        # by combining the jacobians derived from forward kinematics to each
        # of the two bodies that are in contact, evaluated at the contact point
        # in each body's frame.
        J_contact = J_manipuland - J_manipulator
        # Cut off the 3rd dimension, which should always be 0
        J_contact = J_contact[0:2, :, :]
        # Given these, the manipulator equations enter as a linear constraint
        # M qdd + C = Bu + J_contact lambda
        # (the combined effects of lambda on the robot).
        # The list of grasp points, in manipuland frame, that we'll use.
        n_cf = len(self.grasp_points)
        # The evaluated desired manipuland posture.
        manipuland_qdes = self.GetDesiredObjectPosition(t)

        # The desired robot (arm) posture, calculated via inverse kinematics
        # to achieve the desired grasp points on the object in its current
        # target posture.
        qdes, info = self.ComputeTargetPosture(x, manipuland_qdes)
        if info != 1:
            if not self.shut_up:
                print "Warning: target posture IK solve got info %d " \
                      "when computing goal posture at least once during " \
                      "simulation. This means the grasp points was hard to " \
                      "achieve given the current object posture. This is " \
                      "occasionally OK, but indicates that your controller " \
                      "is probably struggling a little." % info
                self.shut_up = True

        # From here, it's up to you. Following the guidelines in the problem
        # set, implement a controller to achieve the specified goals.

        mp = MathematicalProgram()

        #control variables = u = self.nu x 1
        #ddot{q} as variable w/ q's shape
        controls = mp.NewContinuousVariables(self.nu, "controls")
        qdd = mp.NewContinuousVariables(q.shape[0], "qdd")

        #make variables for lambda's and beta's
        k = 0
        b = mp.NewContinuousVariables(2, "betas_%d" % k)
        betas = b
        for i in range(len(self.grasp_points)):
            b = mp.NewContinuousVariables(
                2, "betas_%d" % k)  #pair of betas for each contact point
            betas = np.vstack((betas, b))
            mp.AddLinearConstraint(
                betas[i, 0] >= 0).evaluator().set_description(
                    "beta_0 greater than 0 for %d" % i)
            mp.AddLinearConstraint(
                betas[i, 1] >= 0).evaluator().set_description(
                    "beta_1 greater than 0 for %d" % i)
        #c_0=normals+mu*tangent
        #c1 = normals-mu*tangent
        n = grasp_normals_world_now.T[:, 0:2]  #row=contact point?
        t = get_perpendicular2d(n[0])
        c_i1 = n[0] + np.dot(self.mu, t)
        c_i2 = n[0] - np.dot(self.mu, t)
        l = c_i1.dot(betas[0, 0]) + c_i2.dot(betas[0, 1])
        lambdas = l

        for i in range(1, len(self.grasp_points)):
            n = grasp_normals_world_now.T[:, 0:
                                          2]  #row=contact point? transpose then index
            t = get_perpendicular2d(n[i])
            c_i1 = n[i] - np.dot(self.mu, t)
            c_i2 = n[i] + np.dot(self.mu, t)
            l = c_i1.dot(betas[i, 0]) + c_i2.dot(betas[i, 1])
            lambdas = np.vstack((lambdas, l))

        control_period = 0.0333
        #Constrain the dyanmics
        dyn = M.dot(qdd) + C - B.dot(controls)
        for i in range(q.shape[0]):
            j_c = 0
            for l in range(len(self.grasp_points)):
                j_sub = J_contact[:, l, :].T
                j_c += j_sub.dot(lambdas[l])


#             print(j_c.shape)
#             print(dyn.shape)
            mp.AddConstraint(dyn[i] - j_c[i] == 0)

        #PD controller using kinematics
        Kp = 100.0
        Kd = 1.0
        control_period = 0.0333
        next_q = q + v.dot(control_period) + np.dot(qdd.dot(control_period**2),
                                                    .5)
        next_q_dot = v + qdd.dot(control_period)
        mp.AddQuadraticCost(Kp * (qdes - next_q).dot((qdes - next_q).T) + Kd *
                            (next_q_dot).dot(next_q_dot.T))
        Kp_ext = 100.
        mp.AddQuadraticCost(Kp_ext * (qdes - next_q)[-3:].dot(
            (qdes - next_q)[-3:].T))

        res = Solve(mp)
        c = res.GetSolution(controls)
        return c
Ejemplo n.º 28
0
def mixed_integer_quadratic_program(nc, H, f, A, b, C=None, d=None, **kwargs):
    """
    Solves the strictly convex (H > 0) mixed-integer quadratic program min .5 x' H x + f' x s.t. A x <= b, C x  = d.
    The first nc variables in x are continuous, the remaining are binaries.

    Arguments
    ----------
    nc : int
        Number of continuous variables in the problem.
    H : numpy.ndarray
        Positive definite Hessian of the cost function.
    f : numpy.ndarray
        Gradient of the cost function.
    A : numpy.ndarray
        Left-hand side of the inequality constraints.
    b : numpy.ndarray
        Right-hand side of the inequality constraints.
    C : numpy.ndarray
        Left-hand side of the equality constraints.
    d : numpy.ndarray
        Right-hand side of the equality constraints.

    Returns
    ----------
    sol : dict
        Dictionary with the solution of the MIQP.

        Fields
        ----------
        min : float
            Minimum of the MIQP (None if the problem is unfeasible).
        argmin : numpy.ndarray
            Argument that minimizes the MIQP (None if the problem is unfeasible).
    """

    # check equalities
    if (C is None) != (d is None):
        raise ValueError('missing C or d.')

    # problem size
    n_ineq, n_x = A.shape
    if C is not None:
        n_eq = C.shape[0]
    else:
        n_eq = 0

    # build program
    prog = MathematicalProgram()
    x = np.hstack((
        prog.NewContinuousVariables(nc),
        prog.NewBinaryVariables(n_x - nc)
        ))
    [prog.AddLinearConstraint(A[i].dot(x) <= b[i]) for i in range(n_ineq)]
    [prog.AddLinearConstraint(C[i].dot(x) == d[i]) for i in range(n_eq)]
    prog.AddQuadraticCost(.5*x.dot(H).dot(x) + f.dot(x))

    # solve
    solver = GurobiSolver()
    prog.SetSolverOption(solver.solver_type(), 'OutputFlag', 0)
    [prog.SetSolverOption(solver.solver_type(), parameter, value) for parameter, value in kwargs.items()]
    result = prog.Solve()

    # initialize output
    sol = {
        'min': None,
        'argmin': None
    }

    if result == SolutionResult.kSolutionFound:
        sol['argmin'] = prog.GetSolution(x)
        sol['min'] = .5*sol['argmin'].dot(H).dot(sol['argmin']) + f.dot(sol['argmin'])

    return sol
    def compute_trajectory_to_other_world(self, state_initial, minimum_time,
                                          maximum_time):
        '''
        Your mission is to implement this function.

        A successful implementation of this function will compute a dynamically feasible trajectory
        which satisfies these criteria:
            - Efficiently conserve fuel
            - Reach "orbit" of the far right world
            - Approximately obey the dynamic constraints
            - Begin at the state_initial provided
            - Take no more than maximum_time, no less than minimum_time

        The above are defined more precisely in the provided notebook.

        Please note there are two return args.

        :param: state_initial: :param state_initial: numpy array of length 4, see rocket_dynamics for documentation
        :param: minimum_time: float, minimum time allowed for trajectory
        :param: maximum_time: float, maximum time allowed for trajectory

        :return: three return args separated by commas:

            trajectory, input_trajectory, time_array

            trajectory: a 2d array with N rows, and 4 columns. See simulate_states_over_time for more documentation.
            input_trajectory: a 2d array with N-1 row, and 2 columns. See simulate_states_over_time for more documentation.
            time_array: an array with N rows. 

        '''

        # length of horizon (excluding init state)
        N = 50
        trajectory = np.zeros((N + 1, 4))
        input_trajectory = np.ones((N, 2)) * 10.0

        ### My implementation of Direct Transcription
        # (states and control are all decision vars using Euler integration)
        mp = MathematicalProgram()

        # let trajectory duration be a decision var
        total_time = mp.NewContinuousVariables(1, "total_time")
        dt = total_time[0] / N

        # create the control decision var (m*N) and state decision var (n*[N+1])
        idx = 0
        u_list = mp.NewContinuousVariables(2, "u_{}".format(idx))
        state_list = mp.NewContinuousVariables(4, "state_{}".format(idx))
        state_list = np.vstack(
            (state_list,
             mp.NewContinuousVariables(4, "state_{}".format(idx + 1))))
        for idx in range(1, N):
            u_list = np.vstack(
                (u_list, mp.NewContinuousVariables(2, "u_{}".format(idx))))
            state_list = np.vstack(
                (state_list,
                 mp.NewContinuousVariables(4, "state_{}".format(idx + 1))))

        ### Constraints
        # set init state as constraint on stage 0 decision vars
        for state_idx in range(4):
            mp.AddLinearConstraint(
                state_list[0, state_idx] == state_initial[state_idx])

        # interstage equality constraint on state vars via Euler integration
        # note: Direct Collocation could improve accuracy for same computation
        for idx in range(1, N + 1):
            state_new = state_list[idx - 1, :] + dt * self.rocket_dynamics(
                state_list[idx - 1, :], u_list[idx - 1, :])
            for state_idx in range(4):
                mp.AddConstraint(state_list[idx,
                                            state_idx] == state_new[state_idx])

        # constraint on time
        mp.AddLinearConstraint(total_time[0] <= maximum_time)
        mp.AddLinearConstraint(total_time[0] >= minimum_time)

        # constraint on final state distance (squared)to second planet
        final_dist_to_world_2_sq = (
            self.world_2_position[0] - state_list[-1, 0])**2 + (
                self.world_2_position[1] - state_list[-1, 1])**2
        mp.AddConstraint(final_dist_to_world_2_sq <= 0.25)

        # constraint on final state speed (squared
        final_speed_sq = state_list[-1, 2]**2 + state_list[-1, 3]**2
        mp.AddConstraint(final_speed_sq <= 1)

        ### Cost
        # equal cost on vertical/horizontal accels, weight shouldn't matter since this is the only cost
        mp.AddQuadraticCost(1 * u_list[:, 0].dot(u_list[:, 0]))
        mp.AddQuadraticCost(1 * u_list[:, 1].dot(u_list[:, 1]))

        ### Solve and parse
        result = Solve(mp)
        trajectory = result.GetSolution(state_list)
        input_trajectory = result.GetSolution(u_list)
        tf = result.GetSolution(total_time)
        time_array = np.linspace(0, tf[0], N + 1)

        print "optimization successful: ", result.is_success()
        print "total num decision vars (x: (N+1)*4, u: 2N, total_time: 1): {}".format(
            mp.num_vars())
        print "solver used: ", result.get_solver_id().name()
        print "optimal trajectory time: {:.2f} sec".format(tf[0])

        return trajectory, input_trajectory, time_array
Ejemplo n.º 30
0
    def compute_opt_trajectory(self, state_initial, goal_func, verbose=True):
        '''
        nonlinear trajectory optimization to land drone starting at state_initial, on a vehicle target trajectory given by the goal_func

        :return: three return args separated by commas:

            trajectory, input_trajectory, time_array

            trajectory: a 2d array with N rows, and 6 columns. See simulate_states_over_time for more documentation.
            input_trajectory: a 2d array with N-1 row, and 4 columns. See simulate_states_over_time for more documentation.
            time_array: an array with N rows. 
        '''
        # initialize math program
        import time
        start_time = time.time()
        mp = MathematicalProgram()
        num_time_steps = 40

        booster = mp.NewContinuousVariables(3, "booster_0")
        booster_over_time = booster[np.newaxis, :]

        state = mp.NewContinuousVariables(6, "state_0")
        state_over_time = state[np.newaxis, :]

        dt = mp.NewContinuousVariables(1, "dt")

        for k in range(1, num_time_steps - 1):
            booster = mp.NewContinuousVariables(3, "booster_%d" % k)
            booster_over_time = np.vstack((booster_over_time, booster))
        for k in range(1, num_time_steps):
            state = mp.NewContinuousVariables(6, "state_%d" % k)
            state_over_time = np.vstack((state_over_time, state))

        goal_state = goal_func(dt[0] * 39)

        # calculate states over time
        for i in range(1, num_time_steps):
            sim_next_state = state_over_time[
                i - 1, :] + dt[0] * self.drone_dynamics(
                    state_over_time[i - 1, :], booster_over_time[i - 1, :])

            # add constraints to restrict the next state to the decision variables
            for j in range(6):
                mp.AddConstraint(sim_next_state[j] == state_over_time[i, j])

            # don't hit ground
            mp.AddLinearConstraint(state_over_time[i, 2] >= 0.05)

            # enforce that we must be thrusting within some constraints
            mp.AddLinearConstraint(booster_over_time[i - 1, 0] <= 5.0)
            mp.AddLinearConstraint(booster_over_time[i - 1, 0] >= -5.0)
            mp.AddLinearConstraint(booster_over_time[i - 1, 1] <= 5.0)
            mp.AddLinearConstraint(booster_over_time[i - 1, 1] >= -5.0)

            # keep forces in a reasonable position
            mp.AddLinearConstraint(booster_over_time[i - 1, 2] >= 1.0)
            mp.AddLinearConstraint(
                booster_over_time[i - 1, 0] <= booster_over_time[i - 1, 2])
            mp.AddLinearConstraint(
                booster_over_time[i - 1, 0] >= -booster_over_time[i - 1, 2])
            mp.AddLinearConstraint(
                booster_over_time[i - 1, 1] <= booster_over_time[i - 1, 2])
            mp.AddLinearConstraint(
                booster_over_time[i - 1, 1] >= -booster_over_time[i - 1, 2])

        # add constraints on initial state
        for i in range(6):
            mp.AddLinearConstraint(state_over_time[0, i] == state_initial[i])

        # add constraints on dt
        mp.AddLinearConstraint(dt[0] >= 0.001)

        # add constraints on end state

        # end goal velocity
        mp.AddConstraint(state_over_time[-1, 0] <= goal_state[0] + 0.01)
        mp.AddConstraint(state_over_time[-1, 0] >= goal_state[0] - 0.01)
        mp.AddConstraint(state_over_time[-1, 1] <= goal_state[1] + 0.01)
        mp.AddConstraint(state_over_time[-1, 1] >= goal_state[1] - 0.01)
        mp.AddConstraint(state_over_time[-1, 2] <= goal_state[2] + 0.01)
        mp.AddConstraint(state_over_time[-1, 2] >= goal_state[2] - 0.01)
        mp.AddConstraint(state_over_time[-1, 3] <= goal_state[3] + 0.01)
        mp.AddConstraint(state_over_time[-1, 3] >= goal_state[3] - 0.01)
        mp.AddConstraint(state_over_time[-1, 4] <= goal_state[4] + 0.01)
        mp.AddConstraint(state_over_time[-1, 4] >= goal_state[4] - 0.01)
        mp.AddConstraint(state_over_time[-1, 5] <= goal_state[5] + 0.01)
        mp.AddConstraint(state_over_time[-1, 5] >= goal_state[5] - 0.01)

        # add the cost function
        mp.AddQuadraticCost(
            0.01 * booster_over_time[:, 0].dot(booster_over_time[:, 0]))
        mp.AddQuadraticCost(
            0.01 * booster_over_time[:, 1].dot(booster_over_time[:, 1]))
        mp.AddQuadraticCost(
            0.01 * booster_over_time[:, 2].dot(booster_over_time[:, 2]))

        # add more penalty on dt because minimizing time turns out to be more important
        mp.AddQuadraticCost(10000 * dt[0] * dt[0])

        solved = mp.Solve()
        if verbose:
            print solved

        # extract
        booster_over_time = mp.GetSolution(booster_over_time)
        output_states = mp.GetSolution(state_over_time)
        dt = mp.GetSolution(dt)

        time_array = np.zeros(40)
        for i in range(40):
            time_array[i] = i * dt
        trajectory = self.simulate_states_over_time(state_initial, time_array,
                                                    booster_over_time)

        durations = time_array[1:len(time_array
                                     )] - time_array[0:len(time_array) - 1]
        fuel_consumption = (
            np.sum(booster_over_time[:len(time_array)]**2, axis=1) *
            durations).sum()

        if verbose:
            print 'expected remaining fuel consumption', fuel_consumption
            print("took %s seconds" % (time.time() - start_time))
            print ''

        return trajectory, booster_over_time, time_array, fuel_consumption