コード例 #1
0
def run_complex_mathematical_program():
    print "\n\ncomplex mathematical program"
    mp = MathematicalProgram()
    x = mp.NewContinuousVariables(1, 'x')
    mp.AddCost(cost, x)
    mp.AddConstraint(quad_constraint, [1.0], [2.0], x)
    mp.SetInitialGuess(x, [1.1])
    print mp.Solve()
    res = mp.GetSolution(x)
    print res

    print "(finished) complex mathematical program"
コード例 #2
0
	def optimization(self, quad_plants, ball_plants, contexts):
		prog = MathematicalProgram()
		#Time steps likely requires a line search. 
		#Find the number of time steps until a ball reaches a threshold so that a quadrotor can catch it.
		N = 200 #Number of time steps
		n_u = 2*len(plants)
		n_x = 6*len(quad_plants) + 4*len(ball_plants)
		n_quads = len(quad_plants)
		n_balls = len(ball_plants)
		u = np.empty((self.n_u, N-1), dtype=Variable)
		x = np.empty((self.n_x, N), dtype=Variable)
		I =  0.00383
		r = 0.25
		mass = 0.486
		g = 9.81
		#Add all the decision variables
		for n in range(N-1):
			u[:,n] = prog.NewContinuousVariables(self.n_u, 'u' + str(n))
			x[:,n] = prog.NewContinuousVariables(self.n_x, 'x' + str(n))
		x[:,N-1] = prog.NewContinuousVariables(self.n_x, 'x' + str(N))

		#Start at the correct initial conditions
		#Connect quadrotor and ball state variables to the initial condition of mathprog
		x0 = np.empty((self.n_x,1))
		for k in range(n_quads + n_balls):
			if k < n_quads:
				x0[6*k:6*k+5] = self.quad_plants[k] #???
		prog.AddBoundingBoxConstraint(x0, x0, x[:,0])
		for n in range(N-1):
			for k in range(n_quads + n_balls):
				#Handles no collisions
				if k < n_quads:
					#Quad dynamic constraints
					dynamics_constraint_vel = #?
					dynamics_constraint_acc = #?
					prog.AddConstraint(dynamics_constraint_vel[0, 0])
	  				prog.AddConstraint(dynamics_constraint_vel[1, 0])
					prog.AddConstraint(dynamics_constraint_vel[2, 0])
					prog.AddConstraint(dynamics_constraint_acc[0, 0])
	  				prog.AddConstraint(dynamics_constraint_acc[1, 0])
					prog.AddConstraint(dynamics_constraint_acc[2, 0])
				else:
					#Ball dynamic constraints
					dynamics_constraint_vel = #
					dynamics_constraint_acc = #
					prog.AddConstraint(dynamics_constraint_vel[0, 0])
					prog.AddConstraint(dynamics_constraint_vel[1, 0])
					prog.AddConstraint(dynamics_constraint_acc[0, 0])
	  				prog.AddConstraint(dynamics_constraint_acc[1, 0])
コード例 #3
0
    def test_create_constraint_input_array(self):
        prog = MathematicalProgram()
        q = prog.NewContinuousVariables(1, 'q')
        r = prog.NewContinuousVariables(rows=2, cols=3, name='r')
        q_val = 0.5
        r_val = np.arange(6).reshape((2,3))
        '''
        array([[0, 1, 2],
               [3, 4, 5]])
        '''

        constraint = prog.AddConstraint(le(q, r[0,0] + 2*r[1,1]))
        input_array = create_constraint_input_array(constraint, {
            "q": q_val,
            "r": r_val
        })
        expected_input_array = [0.5, 0, 4]
        np.testing.assert_array_almost_equal(input_array, expected_input_array)

        z = prog.NewContinuousVariables(2, 'z')
        z_val = np.array([0.0, 0.5])
        # https://stackoverflow.com/questions/64736910/using-le-or-ge-with-scalar-left-hand-side-creates-unsized-formula-array
        # constraint = prog.AddConstraint(le(z[1], r[0,2] + 2*r[1,0]))
        constraint = prog.AddConstraint(le([z[1]], r[0,2] + 2*r[1,0]))
        input_array = create_constraint_input_array(constraint, {
            "z": z_val,
            "r": r_val
        })
        expected_input_array = [3, 2, 0.5]
        np.testing.assert_array_almost_equal(input_array, expected_input_array)

        a = prog.NewContinuousVariables(rows=2, cols=1, name='a')
        a_val = np.array([[1.0], [2.0]])
        constraint = prog.AddConstraint(eq(a[0], a[1]))
        input_array = create_constraint_input_array(constraint, {
            "a": a_val
        })
        expected_input_array = [1.0, 2.0]
        np.testing.assert_array_almost_equal(input_array, expected_input_array)
コード例 #4
0
from pydrake.all import Jacobian, MathematicalProgram, Solve


def dynamics(x):
    return -x + x**3


prog = MathematicalProgram()
x = prog.NewIndeterminates(1, "x")
rho = prog.NewContinuousVariables(1, "rho")[0]

# Define the Lyapunov function.
V = x.dot(x)
Vdot = Jacobian([V], x).dot(dynamics(x))[0]

# Define the Lagrange multiplier.
lambda_ = prog.NewContinuousVariables(1, "lambda")[0]
prog.AddConstraint(lambda_ >= 0)

prog.AddSosConstraint((V - rho) * x.dot(x) - lambda_ * Vdot)
prog.AddLinearCost(-rho)

result = Solve(prog)

assert result.is_success()

print("Verified that " + str(V) + " < " + str(result.GetSolution(rho)) +
      " is in the region of attraction.")

assert math.fabs(result.GetSolution(rho) - 1) < 1e-5
コード例 #5
0
    def bounce_pass_wall(self,
                         p_puck,
                         p_goal,
                         which_wall,
                         duration=3,
                         debug=True):
        """Solve for initial velocity for the puck to bounce the wall once and hit the goal."""

        # initialize program
        prog = MathematicalProgram()

        # time periods before and after hitting the wall
        h = prog.NewContinuousVariables(2, name='h')

        # velocities of the ball at the start of each segment (after collision).
        vel_start = prog.NewContinuousVariables(2, name='vel_start')
        vel_after = prog.NewContinuousVariables(2, name='vel_after')

        # sum of durations cannot exceed the specified value
        prog.AddConstraint(np.sum(h) <= duration)

        # Help the solver by telling it initial velocity direction
        self.add_initial_vel_direction_constraint(prog, which_wall, p_goal,
                                                  vel_start)

        # add dynamics constraints for the moment the ball hits the wall
        p_contact = self.next_p(p_puck, vel_start, h[0])
        v_contact = self.next_vel(vel_start, h[0])

        # keep the same x vel, but flip the y vel
        self.add_reset_map_constraint(prog, which_wall, p_contact, v_contact,
                                      vel_after)

        # in the last segment, need to specify bounds for the final position and velocity of the ball
        p_end = self.next_p(p_contact, vel_after, h[1])
        v_end = self.next_vel(vel_after, h[1])
        self.add_goal_constraint(prog, which_wall, p_goal, p_end, v_end)

        # solve for time periods h
        result = Solve(prog)
        if not result.is_success():
            if debug:
                infeasible = GetInfeasibleConstraints(prog, result)
                print("Infeasible constraints in ContactOptimizer:")
                for i in range(len(infeasible)):
                    print(infeasible[i])
            # return directly
            return
        else:
            if debug:
                print("vel_start:{}".format(result.GetSolution(vel_start)))
                print("vel_after: {}".format(result.GetSolution(vel_after)))
                print("h: {}".format(result.GetSolution(h)))
                p1 = self.next_p(p_puck, result.GetSolution(vel_start),
                                 result.GetSolution(h[0]))
                p2 = self.next_p(p1, result.GetSolution(vel_after),
                                 result.GetSolution(h[1]))
                print("p1:{}".format(p1))
                print("p2:{}".format(p2))
                v_end = self.next_vel(result.GetSolution(vel_after),
                                      result.GetSolution(h[1]))
                print("v_end:{}".format(v_end))

        # return whether successful, and the initial puck velocity
        return result.is_success(), result.GetSolution(vel_start)
コード例 #6
0
    def diff_drive_pd_mp(
            self, x,
            x_des):  # x_des = [x, theta, yaw, x_dot, theta_dot, yaw_dot]
        m_s = 0.2  #kg
        d = 0.085  #m
        m_c = 0.056
        I_3 = 0.000228373  #.0000989844 #kg*m^2
        I_2 = .0000989844
        R = 0.0333375
        g = -9.81  #may need to be set as -9.81; test to see
        L = 0.03
        A_val_theta = (m_s * d * g *
                       (3 * m_c + m_s)) / (3 * m_c * I_3 +
                                           3 * m_c * m_s * d**2 + m_s * I_3)
        B_val_theta = (-m_s * d / R - 3 * m_c * m_s) / (
            3 * m_c * I_3 + 3 * m_c * m_s * d**2 + m_s *
            I_3) * math.pi / 180  #multiplicand for u to change in theta_dot
        B_val_phi = -(2 * L / R) / (6 * m_c * L**2 + m_c * R**2 + 2 * I_2)
        mp = MathematicalProgram()
        k = mp.NewContinuousVariables(3, 'k_val')  # [kp, kd, ky]
        u = mp.NewContinuousVariables(2, 'u_val')
        theta_dot_post = mp.NewContinuousVariables(
            1,
            'theta_dot_post_val')  #estimated value of theta dot after control
        phi_dot_post = mp.NewContinuousVariables(
            1, 'phi_dot_post_val')  #estimated value of theta dot after control
        '''
        kp = 1. #regulate theta (pitch) position
        kd = kp / 20. #regulate theta (pitch) velocity
        ky = 0.5 #regulate phi (yaw) position
        '''
        actuator_limit = .1  #estimate; 0.1 is probably high
        #mp.AddConstraint(theta[0] == np.arcsin(2*(x[0]*x[2] - x[1]*x[3]))) #pitch
        theta = np.arcsin(2 * (x[0] * x[2] - x[1] * x[3]))
        phi = np.arctan2(2 * (x[0] * x[1] + x[2] * x[3]),
                         1 - 2 * (x[1]**2 + x[2]**2))  #yaw
        theta_dot = x[
            10]  #Shown to be ~1.5% below true theta_dot on average in experiments
        mp.AddConstraint(k[0] >= 0.)
        mp.AddConstraint(k[1] >= 0.)
        mp.AddConstraint(k[2] >= 0.)
        mp.AddConstraint(u[0] == k[0] * (x_des[1] - theta + k[1] *
                                         (x_des[4] - theta_dot)) + k[2] *
                         (x_des[2] - phi))
        mp.AddConstraint(u[0] <= -actuator_limit)
        mp.AddConstraint(u[0] >= -actuator_limit)
        mp.AddConstraint(u[1] == -u[0])
        mp.AddConstraint(theta_dot_post[0] == theta_dot +
                         B_val_theta * u[0] * 0.0005 +
                         A_val_theta * theta * .0005)
        mp.AddConstraint(phi_dot_post[0] == x[11] + B_val_phi * u[0] * .0005)
        theta_dot_des = -(theta - x_des[1]) / (2 * .0005)
        mp.AddQuadraticCost((theta_dot_post[0] - theta_dot_des)**2)
        phi_dot_des = -(phi - x_des[2]) / 2
        print('theta_dot_des', theta_dot_des)
        print('theta', theta)
        mp.AddQuadraticCost(0.001 * (phi_dot_post[0] - phi_dot_des)**2)

        result = Solve(mp)
        print('Success: ', result.is_success())
        print('Solver id: ', result.get_solver_id().name())
        print('k: ', result.GetSolution(k))
        print('u: ', result.GetSolution(u))
        return result.GetSolution(u)
コード例 #7
0
def trajopt_simulation(x0, xf, u_max=0.5):
	# numeric parameters
	time_interval = .1
	time_steps = 400

	# initialize optimization
	prog = MathematicalProgram()

	# optimization variables
	state = prog.NewContinuousVariables(time_steps + 1, 3, 'state')
	u = prog.NewContinuousVariables(time_steps, 1, 'u')

	# final position constraint
	# for residual in constraint_state_to_orbit(state[-2], state[-1], xf, 1/u_max, time_interval):
	# 	prog.AddConstraint(residual == 0)

	# initial position constraint
	prog.AddConstraint(eq(state[0],x0))

	# discretized dynamics
	for t in range(time_steps):
		residuals = dubins_discrete_dynamics(state[t], state[t+1], u[t], time_interval)
		for residual in residuals:
			prog.AddConstraint(residual == 0)

		# control limits
		prog.AddConstraint(u[t][0] <= u_max)
		prog.AddConstraint(-u_max <= u[t][0])

		# cost - increases cost if off the orbit
		p = state[t][:2] - xf[:2]
		v = (state[t+1][:2] - state[t][:2]) / time_interval
		# prog.AddCost(time_interval)
		# prog.AddCost(u[t][0]*u[t][0]*time_interval)
		# prog.AddCost(p.dot(v)*time_interval)
		for residual in constraint_state_to_orbit(state[t], state[t+1], xf, 1/u_max, time_interval):
			# prog.AddConstraint(residual >= 0)
			prog.AddQuadraticCost(residual)
		# prog.AddCost((p.dot(p)))




	# # initial guess
	state_guess = interpolate_dubins_state(
		np.array([0, 0, np.pi]),
		xf,
		time_steps, 
		time_interval,
		1/u_max
		)

	prog.SetInitialGuess(state, state_guess)

	solver = SnoptSolver()
	result = solver.Solve(prog)

	assert result.is_success()

	# retrieve optimal solution
	u_opt = result.GetSolution(u).T
	state_opt = result.GetSolution(state).T

	return state_opt, u_opt
コード例 #8
0
    def CalcOutput(self, context, output):
        # ============================ LOAD INPUTS ============================
        # Torque inputs
        tau_g = np.expand_dims(
            np.array(self.GetInputPort("tau_g").Eval(context)), 1)
        joint_centering_torque = np.expand_dims(
            np.array(
                self.GetInputPort("joint_centering_torque").Eval(context)), 1)

        # Force inputs
        # PROGRAMMING: Add zeros here?
        F_GT = self.GetInputPort("F_GT").Eval(context)[0]
        F_GN = self.GetInputPort("F_GN").Eval(context)[0]

        # Positions
        theta_L = self.GetInputPort("theta_L").Eval(context)[0]
        theta_MX = self.GetInputPort("theta_MX").Eval(context)[0]
        theta_MY = self.GetInputPort("theta_MY").Eval(context)[0]
        theta_MZ = self.GetInputPort("theta_MZ").Eval(context)[0]
        p_CT = self.GetInputPort("p_CT").Eval(context)[0]
        p_CN = self.GetInputPort("p_CN").Eval(context)[0]
        p_LT = self.GetInputPort("p_LT").Eval(context)[0]
        p_LN = self.GetInputPort("p_LN").Eval(context)[0]
        d_T = self.GetInputPort("d_T").Eval(context)[0]
        d_N = self.GetInputPort("d_N").Eval(context)[0]
        d_X = self.GetInputPort("d_X").Eval(context)[0]
        p_MConM = np.array([self.GetInputPort("p_MConM").Eval(context)]).T

        # Velocities
        d_theta_L = self.GetInputPort("d_theta_L").Eval(context)[0]
        d_theta_MX = self.GetInputPort("d_theta_MX").Eval(context)[0]
        d_theta_MY = self.GetInputPort("d_theta_MY").Eval(context)[0]
        d_theta_MZ = self.GetInputPort("d_theta_MZ").Eval(context)[0]
        d_d_T = self.GetInputPort("d_d_T").Eval(context)[0]
        d_d_N = self.GetInputPort("d_d_N").Eval(context)[0]
        d_d_X = self.GetInputPort("d_d_X").Eval(context)[0]

        # Manipulator inputs
        J = np.array(self.GetInputPort("J").Eval(context)).reshape(
            (6, self.nq))
        J_translational = np.array(
            self.GetInputPort("J_translational").Eval(context)).reshape(
                (3, self.nq))
        J_rotational = np.array(
            self.GetInputPort("J_rotational").Eval(context)).reshape(
                (3, self.nq))
        Jdot_qdot = np.expand_dims(
            np.array(self.GetInputPort("Jdot_qdot").Eval(context)), 1)
        M = np.array(self.GetInputPort("M").Eval(context)).reshape(
            (self.nq, self.nq))
        Cv = np.expand_dims(np.array(self.GetInputPort("Cv").Eval(context)), 1)

        # Other inputs
        mu_S = self.GetInputPort("mu_S").Eval(context)[0]
        hats_T = self.GetInputPort("hats_T").Eval(context)[0]
        s_hat_X = self.GetInputPort("s_hat_X").Eval(context)[0]

        # ============================= OTHER PREP ============================
        if self.theta_MYd is None:
            self.theta_MYd = theta_MY
        if self.theta_MZd is None:
            self.theta_MZd = theta_MZ

        # Calculate desired values
        dd_d_Td = 1000 * (self.d_Td - d_T) - 100 * d_d_T
        dd_theta_Ld = 10 * (self.d_theta_Ld - d_theta_L)
        a_MX_d = 100 * (self.d_Xd - d_X) - 10 * d_d_X
        theta_MXd = theta_L
        alpha_MXd = 100 * (theta_MXd - theta_MX) + 10 * (d_theta_L -
                                                         d_theta_MX)
        alpha_MYd = 10 * (self.theta_MYd - theta_MY) - 5 * d_theta_MY
        alpha_MZd = 10 * (self.theta_MZd - theta_MZ) - 5 * d_theta_MZ
        dd_d_Nd = 0

        # =========================== SOLVE PROGRAM ===========================
        ## 1. Define an instance of MathematicalProgram
        prog = MathematicalProgram()

        ## 2. Add decision variables
        # Contact values
        F_NM = prog.NewContinuousVariables(1, 1, name="F_NM")
        F_ContactMY = prog.NewContinuousVariables(1, 1, name="F_ContactMY")
        F_ContactMZ = prog.NewContinuousVariables(1, 1, name="F_ContactMZ")
        F_NL = prog.NewContinuousVariables(1, 1, name="F_NL")
        if self.model_friction:
            F_FMT = prog.NewContinuousVariables(1, 1, name="F_FMT")
            F_FMX = prog.NewContinuousVariables(1, 1, name="F_FMX")
            F_FLT = prog.NewContinuousVariables(1, 1, name="F_FLT")
            F_FLX = prog.NewContinuousVariables(1, 1, name="F_FLX")
        else:
            F_FMT = np.array([[0]])
            F_FMX = np.array([[0]])
            F_FLT = np.array([[0]])
            F_FLX = np.array([[0]])
        F_ContactM_XYZ = np.array([F_FMX, F_ContactMY, F_ContactMZ])[:, :, 0]

        # Object forces and torques
        if not self.measure_joint_wrench:
            F_OT = prog.NewContinuousVariables(1, 1, name="F_OT")
            F_ON = prog.NewContinuousVariables(1, 1, name="F_ON")
            tau_O = -self.sys_consts.k_J*theta_L \
                    - self.sys_consts.b_J*d_theta_L

        # Control values
        tau_ctrl = prog.NewContinuousVariables(self.nq, 1, name="tau_ctrl")

        # Object accelerations
        a_MX = prog.NewContinuousVariables(1, 1, name="a_MX")
        a_MT = prog.NewContinuousVariables(1, 1, name="a_MT")
        a_MY = prog.NewContinuousVariables(1, 1, name="a_MY")
        a_MZ = prog.NewContinuousVariables(1, 1, name="a_MZ")
        a_MN = prog.NewContinuousVariables(1, 1, name="a_MN")
        a_LT = prog.NewContinuousVariables(1, 1, name="a_LT")
        a_LN = prog.NewContinuousVariables(1, 1, name="a_LN")
        alpha_MX = prog.NewContinuousVariables(1, 1, name="alpha_MX")
        alpha_MY = prog.NewContinuousVariables(1, 1, name="alpha_MY")
        alpha_MZ = prog.NewContinuousVariables(1, 1, name="alpha_MZ")
        alpha_a_MXYZ = np.array(
            [alpha_MX, alpha_MY, alpha_MZ, a_MX, a_MY, a_MZ])[:, :, 0]

        # Derived accelerations
        dd_theta_L = prog.NewContinuousVariables(1, 1, name="dd_theta_L")
        dd_d_N = prog.NewContinuousVariables(1, 1, name="dd_d_N")
        dd_d_T = prog.NewContinuousVariables(1, 1, name="dd_d_T")

        ddq = prog.NewContinuousVariables(self.nq, 1, name="ddq")

        ## Constraints
        # "set_description" calls gives us useful names for printing
        prog.AddConstraint(eq(
            self.sys_consts.m_L * a_LT, F_FLT + F_GT +
            F_OT)).evaluator().set_description("Link tangential force balance")
        prog.AddConstraint(
            eq(self.sys_consts.m_L * a_LN, F_NL + F_GN +
               F_ON)).evaluator().set_description("Link normal force balance")
        prog.AddConstraint(eq(
            self.sys_consts.I_L*dd_theta_L, \
            (-self.sys_consts.w_L/2)*F_ON - (p_CN-p_LN) * F_FLT + \
                (p_CT-p_LT)*F_NL + tau_O
        )).evaluator().set_description("Link moment balance")
        prog.AddConstraint(eq(
            F_NL, -F_NM)).evaluator().set_description("3rd law normal forces")
        if self.model_friction:
            prog.AddConstraint(eq(F_FMT, -F_FLT)).evaluator().set_description(
                "3rd law friction forces (T hat)")
            prog.AddConstraint(eq(F_FMX, -F_FLX)).evaluator().set_description(
                "3rd law friction forces (X hat)")
        prog.AddConstraint(eq(
            -dd_theta_L*(self.sys_consts.h_L/2+self.sys_consts.r) + \
                d_theta_L**2*self.sys_consts.w_L/2 - a_LT + a_MT,
            -dd_theta_L*d_N + dd_d_T - d_theta_L**2*d_T - 2*d_theta_L*d_d_N
        )).evaluator().set_description("d_N derivative is derivative")
        prog.AddConstraint(eq(
            -dd_theta_L*self.sys_consts.w_L/2 - \
                d_theta_L**2*self.sys_consts.h_L/2 - \
                d_theta_L**2*self.sys_consts.r - a_LN + a_MN,
            dd_theta_L*d_T + dd_d_N - d_theta_L**2*d_N + 2*d_theta_L*d_d_T
        )).evaluator().set_description("d_N derivative is derivative")
        prog.AddConstraint(eq(dd_d_N,
                              0)).evaluator().set_description("No penetration")
        if self.model_friction:
            prog.AddConstraint(
                eq(F_FLT, mu_S * F_NL * self.sys_consts.mu *
                   hats_T)).evaluator().set_description(
                       "Friction relationship LT")
            prog.AddConstraint(
                eq(F_FLX, mu_S * F_NL * self.sys_consts.mu *
                   s_hat_X)).evaluator().set_description(
                       "Friction relationship LX")

        if not self.measure_joint_wrench:
            prog.AddConstraint(
                eq(a_LT, -(self.sys_consts.w_L / 2) * d_theta_L**
                   2)).evaluator().set_description("Hinge constraint (T hat)")
            prog.AddConstraint(eq(a_LN, (self.sys_consts.w_L / 2) *
                                  dd_theta_L)).evaluator().set_description(
                                      "Hinge constraint (N hat)")

        for i in range(6):
            lhs_i = alpha_a_MXYZ[i, 0]
            assert not hasattr(lhs_i, "shape")
            rhs_i = (Jdot_qdot + np.matmul(J, ddq))[i, 0]
            assert not hasattr(rhs_i, "shape")
            prog.AddConstraint(lhs_i == rhs_i).evaluator().set_description(
                "Relate manipulator and end effector with joint " + \
                "accelerations " + str(i))

        tau_contact_trn = np.matmul(J_translational.T, F_ContactM_XYZ)
        tau_contact_rot = np.matmul(J_rotational.T,
                                    np.cross(p_MConM, F_ContactM_XYZ, axis=0))
        tau_contact = tau_contact_trn + tau_contact_rot
        tau_out = tau_ctrl - tau_g + joint_centering_torque
        for i in range(self.nq):
            M_ddq_row_i = (np.matmul(M, ddq) + Cv)[i, 0]
            assert not hasattr(M_ddq_row_i, "shape")
            tau_i = (tau_g + tau_contact + tau_out)[i, 0]
            assert not hasattr(tau_i, "shape")
            prog.AddConstraint(M_ddq_row_i == tau_i).evaluator(
            ).set_description("Manipulator equations " + str(i))

        # Projection equations
        prog.AddConstraint(
            eq(a_MT,
               np.cos(theta_L) * a_MY + np.sin(theta_L) * a_MZ))
        prog.AddConstraint(
            eq(a_MN, -np.sin(theta_L) * a_MY + np.cos(theta_L) * a_MZ))
        prog.AddConstraint(
            eq(F_FMT,
               np.cos(theta_L) * F_ContactMY + np.sin(theta_L) * F_ContactMZ))
        prog.AddConstraint(
            eq(F_NM,
               -np.sin(theta_L) * F_ContactMY + np.cos(theta_L) * F_ContactMZ))

        prog.AddConstraint(dd_d_T[0,
                                  0] == dd_d_Td).evaluator().set_description(
                                      "Desired dd_d_Td constraint" + str(i))
        prog.AddConstraint(dd_theta_L[0, 0] == dd_theta_Ld).evaluator(
        ).set_description("Desired a_LN constraint" + str(i))
        prog.AddConstraint(a_MX[0, 0] == a_MX_d).evaluator().set_description(
            "Desired a_MX constraint" + str(i))
        prog.AddConstraint(alpha_MX[0, 0] == alpha_MXd).evaluator(
        ).set_description("Desired alpha_MX constraint" + str(i))
        prog.AddConstraint(alpha_MY[0, 0] == alpha_MYd).evaluator(
        ).set_description("Desired alpha_MY constraint" + str(i))
        prog.AddConstraint(alpha_MZ[0, 0] == alpha_MZd).evaluator(
        ).set_description("Desired alpha_MZ constraint" + str(i))
        prog.AddConstraint(dd_d_N[0,
                                  0] == dd_d_Nd).evaluator().set_description(
                                      "Desired dd_d_N constraint" + str(i))

        result = Solve(prog)
        assert result.is_success()
        tau_ctrl_result = []
        for i in range(self.nq):
            tau_ctrl_result.append(
                result.GetSolution()[prog.FindDecisionVariableIndex(
                    tau_ctrl[i, 0])])
        tau_ctrl_result = np.expand_dims(tau_ctrl_result, 1)

        # ======================== UPDATE DEBUG VALUES ========================
        self.debug["times"].append(context.get_time())

        # control effort
        self.debug["dd_d_Td"].append(dd_d_Td)
        self.debug["dd_theta_Ld"].append(dd_theta_Ld)
        self.debug["a_MX_d"].append(a_MX_d)
        self.debug["alpha_MXd"].append(alpha_MXd)
        self.debug["alpha_MYd"].append(alpha_MYd)
        self.debug["alpha_MZd"].append(alpha_MZd)
        self.debug["dd_d_Nd"].append(dd_d_Nd)

        # decision variables
        if self.model_friction:
            self.debug["F_FMT"].append(
                result.GetSolution()[prog.FindDecisionVariableIndex(F_FMT[0,
                                                                          0])])
            self.debug["F_FMX"].append(
                result.GetSolution()[prog.FindDecisionVariableIndex(F_FMX[0,
                                                                          0])])
            self.debug["F_FLT"].append(
                result.GetSolution()[prog.FindDecisionVariableIndex(F_FLT[0,
                                                                          0])])
            self.debug["F_FLX"].append(
                result.GetSolution()[prog.FindDecisionVariableIndex(F_FLX[0,
                                                                          0])])
        else:
            self.debug["F_FMT"].append(F_FMT)
            self.debug["F_FMX"].append(F_FMX)
            self.debug["F_FLT"].append(F_FLT)
            self.debug["F_FLX"].append(F_FLX)
        self.debug["F_NM"].append(
            result.GetSolution()[prog.FindDecisionVariableIndex(F_NM[0, 0])])
        self.debug["F_ContactMY"].append(
            result.GetSolution()[prog.FindDecisionVariableIndex(
                F_ContactMY[0, 0])])
        self.debug["F_ContactMZ"].append(
            result.GetSolution()[prog.FindDecisionVariableIndex(
                F_ContactMZ[0, 0])])
        self.debug["F_NL"].append(
            result.GetSolution()[prog.FindDecisionVariableIndex(F_NL[0, 0])])
        for i in range(self.nq):
            self.debug["tau_ctrl_" + str(i)].append(
                result.GetSolution()[prog.FindDecisionVariableIndex(
                    tau_ctrl[i, 0])])
        self.debug["a_MX"].append(
            result.GetSolution()[prog.FindDecisionVariableIndex(a_MX[0, 0])])
        self.debug["a_MT"].append(
            result.GetSolution()[prog.FindDecisionVariableIndex(a_MT[0, 0])])
        self.debug["a_MY"].append(
            result.GetSolution()[prog.FindDecisionVariableIndex(a_MY[0, 0])])
        self.debug["a_MZ"].append(
            result.GetSolution()[prog.FindDecisionVariableIndex(a_MZ[0, 0])])
        self.debug["a_MN"].append(
            result.GetSolution()[prog.FindDecisionVariableIndex(a_MN[0, 0])])
        self.debug["a_LT"].append(
            result.GetSolution()[prog.FindDecisionVariableIndex(a_LT[0, 0])])
        self.debug["a_LN"].append(
            result.GetSolution()[prog.FindDecisionVariableIndex(a_LN[0, 0])])
        self.debug["alpha_MX"].append(
            result.GetSolution()[prog.FindDecisionVariableIndex(alpha_MX[0,
                                                                         0])])
        self.debug["alpha_MY"].append(
            result.GetSolution()[prog.FindDecisionVariableIndex(alpha_MY[0,
                                                                         0])])
        self.debug["alpha_MZ"].append(
            result.GetSolution()[prog.FindDecisionVariableIndex(alpha_MZ[0,
                                                                         0])])
        self.debug["dd_theta_L"].append(
            result.GetSolution()[prog.FindDecisionVariableIndex(
                dd_theta_L[0, 0])])
        self.debug["dd_d_N"].append(
            result.GetSolution()[prog.FindDecisionVariableIndex(dd_d_N[0, 0])])
        self.debug["dd_d_T"].append(
            result.GetSolution()[prog.FindDecisionVariableIndex(dd_d_T[0, 0])])
        for i in range(self.nq):
            self.debug["ddq_" + str(i)].append(
                result.GetSolution()[prog.FindDecisionVariableIndex(ddq[i,
                                                                        0])])
        self.debug["theta_MXd"].append(theta_MXd)

        output.SetFromVector(tau_ctrl_result.flatten())
コード例 #9
0
class Optimization:
    def __init__(self, config):
        self.physical_params = config["physical_params"]
        self.T = config["T"]
        self.dt = config["dt"]
        self.initial_state = config["xinit"]
        self.goal_state = config["xgoal"]
        self.ellipse_arc = config["ellipse_arc"]
        self.deviation_cost = config["deviation_cost"]
        self.Qf = config["Qf"]
        self.limits = config["limits"]
        self.n_state = 6
        self.n_nominal_forces = 4
        self.tire_model = config["tire_model"]
        self.initial_guess_config = config["initial_guess_config"]
        self.puddle_model = config["puddle_model"]
        self.force_constraint = config["force_constraint"]
        self.visualize_initial_guess = config["visualize_initial_guess"]
        self.dynamics = Dynamics(self.physical_params.lf,
                                 self.physical_params.lr,
                                 self.physical_params.m,
                                 self.physical_params.Iz, self.dt)

    def build_program(self):
        self.prog = MathematicalProgram()

        # Declare variables.
        state = self.prog.NewContinuousVariables(rows=self.T + 1,
                                                 cols=self.n_state,
                                                 name='state')
        nominal_forces = self.prog.NewContinuousVariables(
            rows=self.T, cols=self.n_nominal_forces, name='nominal_forces')
        steers = self.prog.NewContinuousVariables(rows=self.T, name="steers")
        slip_ratios = self.prog.NewContinuousVariables(rows=self.T,
                                                       cols=2,
                                                       name="slip_ratios")
        self.state = state
        self.nominal_forces = nominal_forces
        self.steers = steers
        self.slip_ratios = slip_ratios

        # Set the initial state.
        xinit = pack_state_vector(self.initial_state)
        for i, s in enumerate(xinit):
            self.prog.AddConstraint(state[0, i] == s)

        # Constrain xdot to always be at least some value to prevent numerical issues with optimizer.
        for i in range(self.T + 1):
            s = unpack_state_vector(state[i])
            self.prog.AddConstraint(s["xdot"] >= self.limits.min_xdot)

            # Constrain slip ratio to be at least a certain magnitude.
            if i != self.T:
                self.prog.AddConstraint(
                    slip_ratios[i,
                                0]**2.0 >= self.limits.min_slip_ratio_mag**2.0)
                self.prog.AddConstraint(
                    slip_ratios[i,
                                1]**2.0 >= self.limits.min_slip_ratio_mag**2.0)

        # Control rate limits.
        for i in range(self.T - 1):
            ddelta = self.dt * (steers[i + 1] - steers[i])
            f_dkappa = self.dt * (slip_ratios[i + 1, 0] - slip_ratios[i, 0])
            r_dkappa = self.dt * (slip_ratios[i + 1, 1] - slip_ratios[i, 1])
            self.prog.AddConstraint(ddelta <= self.limits.max_ddelta)
            self.prog.AddConstraint(ddelta >= -self.limits.max_ddelta)
            self.prog.AddConstraint(f_dkappa <= self.limits.max_dkappa)
            self.prog.AddConstraint(f_dkappa >= -self.limits.max_dkappa)
            self.prog.AddConstraint(r_dkappa <= self.limits.max_dkappa)
            self.prog.AddConstraint(r_dkappa >= -self.limits.max_dkappa)

        # Control value limits.
        for i in range(self.T):
            self.prog.AddConstraint(steers[i] <= self.limits.max_delta)
            self.prog.AddConstraint(steers[i] >= -self.limits.max_delta)

        # Add dynamics constraints by constraining residuals to be zero.
        for i in range(self.T):
            residuals = self.dynamics.nominal_dynamics(state[i], state[i + 1],
                                                       nominal_forces[i],
                                                       steers[i])
            for r in residuals:
                self.prog.AddConstraint(r == 0.0)

        # Add the cost function.
        self.add_cost(state)

        # Add a different force constraint depending on the configuration.
        if self.force_constraint == ForceConstraint.NO_PUDDLE:
            self.add_no_puddle_force_constraints(
                state, nominal_forces, steers,
                self.tire_model.get_deterministic_model(), slip_ratios)
        elif self.force_constraint == ForceConstraint.MEAN_CONSTRAINED:
            self.add_mean_constrained()
        elif self.force_constraint == ForceConstraint.CHANCE_CONSTRAINED:
            self.add_chance_constraints()
        else:
            raise NotImplementedError("ForceConstraint type invalid.")
        return

    def constant_input_initial_guess(self, state, steers, slip_ratios,
                                     nominal_forces):
        # Guess the slip ratio as the minimum allowed value.
        gslip_ratios = np.tile(
            np.array([
                self.limits.min_slip_ratio_mag, self.limits.min_slip_ratio_mag
            ]), (self.T, 1))

        # Guess the slip angle as a linearly ramping steer that then becomes constant.
        # This is done by creating an array of values corresponding to end_delta then
        # filling in the initial ramp up phase.
        gsteers = self.initial_guess_config["end_delta"] * np.ones(self.T)
        igc = self.initial_guess_config
        for i in range(igc["ramp_steps"]):
            gsteers[i] = (i / igc["ramp_steps"]) * (igc["end_delta"] -
                                                    igc["start_delta"])

        # Create empty arrays for state and forces.
        gstate = np.empty((self.T + 1, self.n_state))
        gstate[0] = pack_state_vector(self.initial_state)
        gforces = np.empty((self.T, 4))
        all_slips = self.T * [None]

        # Simulate the dynamics for the initial guess differently depending on the force
        # constraint being used.
        if self.force_constraint == ForceConstraint.NO_PUDDLE:
            tire_model = self.tire_model.get_deterministic_model()
            for i in range(self.T):
                s = unpack_state_vector(gstate[i])

                # Simulate the dynamics for one time step.
                gstate[i + 1], forces, slips = self.dynamics.simulate(
                    gstate[i], gsteers[i], gslip_ratios[i, 0],
                    gslip_ratios[i, 1], tire_model, self.dt)

                # Store the results.
                gforces[i] = pack_force_vector(forces)
                all_slips[i] = slips
        elif self.force_constraint == ForceConstraint.MEAN_CONSTRAINED or self.force_constraint == ForceConstraint.CHANCE_CONSTRAINED:
            # mean model is a function that maps (slip_ratio, slip_angle, x, y) -> (E[Fx], E[Fy])
            mean_model = self.tire_model.get_mean_model(
                self.puddle_model.get_mean_fun())

            for i in range(self.T):
                # Update the tire model based off the conditions of the world
                # at (x, y)
                s = unpack_state_vector(gstate[i])
                tire_model = lambda slip_ratio, slip_angle: mean_model(
                    slip_ratio, slip_angle, s["X"], s["Y"])

                # Simulate the dynamics for one time step.
                gstate[i + 1], forces, slips = self.dynamics.simulate(
                    gstate[i], gsteers[i], gslip_ratios[i, 0],
                    gslip_ratios[i, 1], tire_model, self.dt)

                # Store the results.
                gforces[i] = pack_force_vector(forces)
                all_slips[i] = slips
        else:
            raise NotImplementedError(
                "Invalid value for self.force_constraint")

        # Declare array for the initial guess and set the values.
        initial_guess = np.empty(self.prog.num_vars())
        self.prog.SetDecisionVariableValueInVector(state, gstate,
                                                   initial_guess)
        self.prog.SetDecisionVariableValueInVector(steers, gsteers,
                                                   initial_guess)
        self.prog.SetDecisionVariableValueInVector(slip_ratios, gslip_ratios,
                                                   initial_guess)
        self.prog.SetDecisionVariableValueInVector(nominal_forces, gforces,
                                                   initial_guess)

        if self.visualize_initial_guess:
            # TODO: reorganize visualizations
            psis = gstate[:, 2]
            xs = gstate[:, 4]
            ys = gstate[:, 5]
            fig, ax = plt.subplots()
            if self.force_constraint != ForceConstraint.NO_PUDDLE:
                plot_puddles(ax, self.puddle_model)
            plot_ellipse_arc(ax, self.ellipse_arc)
            plot_planned_trajectory(ax, xs, ys, psis, gsteers,
                                    self.physical_params)
            # Plot the slip ratios/angles
            plot_slips(all_slips)
            plot_forces(gforces)
            if self.force_constraint == ForceConstraint.NO_PUDDLE:
                generate_animation(xs,
                                   ys,
                                   psis,
                                   gsteers,
                                   self.physical_params,
                                   self.dt,
                                   puddle_model=None)
            else:
                generate_animation(xs,
                                   ys,
                                   psis,
                                   gsteers,
                                   self.physical_params,
                                   self.dt,
                                   puddle_model=self.puddle_model)
        return initial_guess

    def solve_program(self):
        # Generate initial guess
        initial_guess = self.constant_input_initial_guess(
            self.state, self.steers, self.slip_ratios, self.nominal_forces)

        # Solve the problem.
        solver = SnoptSolver()
        result = solver.Solve(self.prog, initial_guess)
        solver_details = result.get_solver_details()
        print("Exit flag: " + str(solver_details.info))

        self.visualize_results(result)

    def visualize_results(self, result):
        state_res = result.GetSolution(self.state)
        psis = state_res[:, 2]
        xs = state_res[:, 4]
        ys = state_res[:, 5]
        steers = result.GetSolution(self.steers)

        fig, ax = plt.subplots()
        plot_ellipse_arc(ax, self.ellipse_arc)
        plot_puddles(ax, self.puddle_model)
        plot_planned_trajectory(ax, xs, ys, psis, steers, self.physical_params)
        generate_animation(xs,
                           ys,
                           psis,
                           steers,
                           self.physical_params,
                           self.dt,
                           puddle_model=self.puddle_model)
        plt.show()

    def add_cost(self, state):
        # Add the final state cost function.
        diff_state = pack_state_vector(self.goal_state) - state[-1]
        self.prog.AddQuadraticCost(diff_state.T @ self.Qf @ diff_state)

        # Get the approx distance function for the ellipse.
        fun = self.ellipse_arc.approx_dist_fun()
        for i in range(self.T):
            s = unpack_state_vector(state[i])
            self.prog.AddCost(self.deviation_cost * fun(s["X"], s["Y"]))

    def add_mean_constrained(self):
        # mean model is a function that maps (slip_ratio, slip_angle, x, y) -> (E[Fx], E[Fy])
        mean_model = self.tire_model.get_mean_model(
            self.puddle_model.get_mean_fun())

        for i in range(self.T):
            # Get slip angles and slip ratios.
            s = unpack_state_vector(self.state[i])
            F = unpack_force_vector(self.nominal_forces[i])
            # get the tire model at this position in space.
            tire_model = lambda slip_ratio, slip_angle: mean_model(
                slip_ratio, slip_angle, s["X"], s["Y"])

            # Unpack values
            delta = self.steers[i]
            alpha_f, alpha_r = self.dynamics.slip_angles(
                s["xdot"], s["ydot"], s["psidot"], delta)
            kappa_f = self.slip_ratios[i, 0]
            kappa_r = self.slip_ratios[i, 1]

            # Compute expected forces.
            E_Ffx, E_Ffy = tire_model(kappa_f, alpha_f)
            E_Frx, E_Fry = tire_model(kappa_r, alpha_r)

            # Constrain these expected force values to be equal to the nominal
            # forces in the optimization.
            self.prog.AddConstraint(E_Ffx - F["f_long"] == 0.0)
            self.prog.AddConstraint(E_Ffy - F["f_lat"] == 0.0)
            self.prog.AddConstraint(E_Frx - F["r_long"] == 0.0)
            self.prog.AddConstraint(E_Fry - F["r_lat"] == 0.0)

    def add_chance_constrained(self):
        pass

    def add_no_puddle_force_constraints(self, state, forces, steers,
                                        pacejka_model, slip_ratios):
        """
        Args:
            prog:
            state:
            forces:
            steers:
            pacejka_model: function with signature (slip_ratio, slip_angle) using pydrake.math
        """
        for i in range(self.T):
            # Get slip angles and slip ratios.
            s = unpack_state_vector(state[i])
            F = unpack_force_vector(forces[i])
            delta = steers[i]
            alpha_f, alpha_r = self.dynamics.slip_angles(
                s["xdot"], s["ydot"], s["psidot"], delta)
            kappa_f = slip_ratios[i, 0]
            kappa_r = slip_ratios[i, 1]
            Ffx, Ffy = pacejka_model(kappa_f, alpha_f)
            Frx, Fry = pacejka_model(kappa_r, alpha_r)

            # Constrain the values from the pacejka model to be equal
            # to the desired values in the optimization.
            self.prog.AddConstraint(Ffx - F["f_long"] == 0.0)
            self.prog.AddConstraint(Ffy - F["f_lat"] == 0.0)
            self.prog.AddConstraint(Frx - F["r_long"] == 0.0)
            self.prog.AddConstraint(Fry - F["r_lat"] == 0.0)
コード例 #10
0
"""

# numeric parameters
time_interval = .5 # in years
time_steps = 100

# initialize optimization
prog = MathematicalProgram()

# optimization variables
state = prog.NewContinuousVariables(time_steps + 1, 4, 'state')
thrust = prog.NewContinuousVariables(time_steps, 2, 'thrust')

# initial orbit constraints
for residual in universe.constraint_state_to_orbit(state[0], 'Earth'):
    prog.AddConstraint(residual == 0)

# terminal orbit constraints
for residual in universe.constraint_state_to_orbit(state[-1], 'Mars'):
    prog.AddConstraint(residual == 0)
    
# discretized dynamics
for t in range(time_steps):
    residuals = universe.rocket_discrete_dynamics(state[t], state[t+1], thrust[t], time_interval)
    for residual in residuals:
        prog.AddConstraint(residual == 0)
    
# initial guess
state_guess = interpolate_rocket_state(
    universe.get_planet('Earth').position,
    universe.get_planet('Mars').position,
コード例 #11
0
def optimal_trajectory(joints,
                       start_position,
                       end_position,
                       signed_dist_fn,
                       nc,
                       time=10,
                       knot_points=100):
    assert len(joints) == len(start_position)
    assert len(joints) == len(end_position)

    h = time / (knot_points - 1)
    nq = len(joints)
    prog = MathematicalProgram()
    q_var = []
    v_var = []
    for ii in range(knot_points):
        q_var.append(prog.NewContinuousVariables(nq, "q[" + str(ii) + "]"))
        v_var.append(prog.NewContinuousVariables(nq, "v[" + str(ii) + "]"))

    # ---------------------------------------------------------------
    # Initial & Final Pose Constraints ------------------------------
    x_i = np.append(start_position, np.zeros(nq))
    x_i_vars = np.append(q_var[0], v_var[0])
    prog.AddLinearEqualityConstraint(np.eye(2 * nq), x_i, x_i_vars)
    tol = 0.01 * np.ones(2 * nq)
    x_f = np.append(end_position, np.zeros(nq))
    x_f_vars = np.append(q_var[-1], v_var[-1])
    prog.AddBoundingBoxConstraint(x_f - tol, x_f + tol, x_f_vars)

    # ---------------------------------------------------------------
    # Dynamics Constraints ------------------------------------------
    for ii in range(knot_points - 1):
        dyn_con1 = np.hstack((np.eye(nq), np.eye(nq), -np.eye(nq)))
        dyn_var1 = np.concatenate((q_var[ii], v_var[ii], q_var[ii + 1]))
        prog.AddLinearEqualityConstraint(dyn_con1, np.zeros(nq), dyn_var1)

    # ---------------------------------------------------------------
    # Joint Limit Constraints ---------------------------------------
    q_min = np.array([j.lower_limits() for j in joints])
    q_max = np.array([j.upper_limits() for j in joints])
    for ii in range(knot_points):
        prog.AddBoundingBoxConstraint(q_min, q_max, q_var[ii])

    # ---------------------------------------------------------------
    # Collision Constraints -----------------------------------------
    for ii in range(knot_points):
        prog.AddConstraint(signed_dist_fn, np.zeros(nc), 1e8 * np.ones(nc),
                           q_var[ii])

    # ---------------------------------------------------------------
    # Dynamics Constraints ------------------------------------------
    for ii in range(knot_points):
        prog.AddQuadraticErrorCost(np.eye(nq), np.zeros(nq), v_var[ii])

    xi = np.array(start_position)
    xf = np.array(end_position)
    for ii in range(knot_points):
        prog.SetInitialGuess(q_var[ii],
                             ii * (xf - xi) / (knot_points - 1) + xi)
        prog.SetInitialGuess(v_var[ii], np.zeros(nq))

    result = prog.Solve()
    print prog.GetSolverId().name()
    if result != SolutionResult.kSolutionFound:
        print result
        return None
    q_path = []
    # print signed_dist_fn(prog.GetSolution(q_var[0]))
    for ii in range(knot_points):
        q_path.append(prog.GetSolution(q_var[ii]))
    return q_path
コード例 #12
0
                    ])

def constraint_evaluator2(z):
    return np.array([z[3]*dt + z[0] - z[6], z[4]*dt + z[1] - z[7], z[5]*dt + z[2] - z[8]])

for n in range(N-1):
    # Will eventually be prog.AddConstraint(x[:,n+1] == A@x[:,n] + B@u[:,n])
    # See drake issues 12841 and 8315
    # prog.AddConstraint(eq(dx[0,n+1], dx[0,n] + dt*(-(u[0,n] + u[1,n])*sin(x[2,n])/m)))
    # prog.AddConstraint(eq(dx[1,n+1], dx[1,n] + dt*((u[0,n] + u[1,n])*cos(x[2,n]) - m*g/m)))
    # prog.AddConstraint(eq(dx[2,n+1], dx[2,n] + dt*((u[0,n] - u[1,n])*r/I)))
    prog.AddQuadraticCost(sum(x[:,n]**2) + sum(dx[:,n]**2) + sum(u[:,n]**2))
    prog.AddConstraint(constraint_evaluator1,
                        lb=np.array([0]*3),
                        ub=np.array([0]*3),
                        vars=[x[0, n], x[1, n], x[2, n], 
                              dx[0, n], dx[1, n], dx[2, n],
                              u[0, n], u[1, n],
                              dx[0, n+1], dx[1, n+1], dx[2, n+1],])

    prog.AddConstraint(constraint_evaluator2,
                        lb=np.array([0]*3),
                        ub=np.array([0]*3),
                        vars=[x[0, n], x[1, n], x[2, n], 
                              dx[0, n], dx[1, n], dx[2, n],
                              x[0, n+1], x[1, n+1], x[2, n+1],])

    # prog.AddConstraint(eq(x[1,n+1], x[1,n] + dt*dx[1,n]))
    # prog.AddConstraint(eq(x[2,n+1], x[2,n] + dt*dx[2,n]))

    prog.AddBoundingBoxConstraint([-f*m*g]*2, [f*m*g]*2, u[:,n])
コード例 #13
0
    def compute_control(self, x_p1, x_p2, x_puck, p_goal, obstacles):
        """Solve for initial velocity for the puck to bounce the wall once and hit the goal."""
        """Currently does not work"""
        # initialize program
        N = self.mpc_params.N  # length of receding horizon
        prog = MathematicalProgram()

        # State and input variables
        x1 = prog.NewContinuousVariables(N + 1, 4,
                                         name='p1_state')  # state of player 1
        u1 = prog.NewContinuousVariables(
            N, 2, name='p1_input')  # inputs for player 1
        xp = prog.NewContinuousVariables(
            N + 1, 4, name='puck_state')  # state of the puck

        # Slack variables
        t1_kick = prog.NewContinuousVariables(
            N + 1, name='kick_time'
        )  # slack variables that captures if player 1 is kicking or not at the given time step
        # Defined as continous, but treated as mixed integer. 1 when kicking
        v1_kick = prog.NewContinuousVariables(
            N + 1, 2, name='v1_kick')  # velocity of player after kick to puck
        vp_kick = prog.NewContinuousVariables(
            N + 1, 2, name='vp_kick'
        )  # velocity of puck after being kicked by the player
        dist = prog.NewContinuousVariables(
            N + 1, name='dist_puck_player')  # distance between puck and player
        cost = prog.NewContinuousVariables(
            N + 1, name='cost')  # slack variable to monitor cost

        # Compute distance between puck and player as slack variable.
        for k in range(N + 1):
            r1 = self.sim_params.player_radius
            rp = self.sim_params.puck_radius
            prog.AddConstraint(
                dist[k] == (x1[k, 0:2] - xp[k, 0:2]).dot(x1[k, 0:2] -
                                                         xp[k, 0:2]) -
                (r1 + rp)**2)

        #### COST and final states
        # TODO: find adequate final velocity
        x_puck_des = np.concatenate(
            (p_goal, np.zeros(2)), axis=0)  # desired position and vel for puck
        for k in range(N + 1):
            Q_dist_puck_goal = 10 * np.eye(2)
            q_dist_puck_player = 0.1
            e1 = x_puck_des[0:2] - xp[k, 0:2]
            e2 = xp[k, 0:2] - x1[k, 0:2]
            prog.AddConstraint(
                cost[k] == e1.dot(np.matmul(Q_dist_puck_goal, e1)) +
                q_dist_puck_player * dist[k])
            prog.AddCost(cost[k])
            #prog.AddQuadraticErrorCost(Q=self.mpc_params.Q_puck, x_desired=x_puck_des, vars=xp[k])  # puck in the goal
            #prog.AddQuadraticErrorCost(Q=np.eye(2), x_desired=x_puck_des[0:2], vars=x1[k, 0:2])
            #prog.AddQuadraticErrorCost(Q=10*np.eye(2), x_desired=np.array([2, 2]), vars=x1[k, 0:2]) # TEST: control position of the player instead of puck
        #for k in range(N):
        #    prog.AddQuadraticCost(1e-2*u1[k].dot(u1[k]))                 # be wise on control effort

        # Initial states for puck and player
        prog.AddBoundingBoxConstraint(x_p1, x_p1,
                                      x1[0])  # Intial state for player 1
        prog.AddBoundingBoxConstraint(x_puck, x_puck,
                                      xp[0])  # Initial state for puck

        # Compute elastic collision for every possible timestep.
        for k in range(N + 1):
            v_puck_bfr = xp[k, 2:4]
            v_player_bfr = x1[k, 2:4]
            v_puck_aft, v_player_aft = self.get_reset_velocities(
                v_puck_bfr, v_player_bfr)
            prog.AddConstraint(eq(vp_kick[k], v_puck_aft))
            prog.AddConstraint(eq(v1_kick[k], v_player_aft))

        # Use slack variable to activate guard condition based on distance.
        M = 15**2
        for k in range(N + 1):
            prog.AddLinearConstraint(dist[k] <= M * (1 - t1_kick[k]))
            prog.AddLinearConstraint(dist[k] >= -t1_kick[k] * M)

        # Hybrid dynamics for player
        #for k in range(N):
        #    A = self.mpc_params.A_player
        #    B = self.mpc_params.B_player
        #    x1_kick = np.concatenate((x1[k][0:2], v1_kick[k]), axis=0) # The state of the player after it kicks the puck
        #    x1_next = np.matmul(A, (1 - t1_kick[k])*x1[k] + t1_kick[k]*x1_kick) + np.matmul(B, u1[k])
        #    prog.AddConstraint(eq(x1[k+1], x1_next))

        # Assuming player dynamics are not affected by collision
        for k in range(N):
            A = self.mpc_params.A_player
            B = self.mpc_params.B_player
            x1_next = np.matmul(A, x1[k]) + np.matmul(B, u1[k])
            prog.AddConstraint(eq(x1[k + 1], x1_next))

        # Hybrid dynamics for puck_mass
        for k in range(N):
            A = self.mpc_params.A_puck
            xp_kick = np.concatenate(
                (xp[k][0:2], vp_kick[k]),
                axis=0)  # State of the puck after it gets kicked
            xp_next = np.matmul(A, (1 - t1_kick[k]) * xp[k] +
                                t1_kick[k] * xp_kick)
            prog.AddConstraint(eq(xp[k + 1], xp_next))

        # Generate trajectory that is not in direct collision with the puck
        for k in range(N + 1):
            eps = 0.1
            prog.AddConstraint(dist[k] >= -eps)

        # Input and arena constraint
        self.add_input_limits(prog, u1, N)
        self.add_arena_limits(prog, x1, N)
        self.add_arena_limits(prog, xp, N)

        # fake mixed-integer constraint
        #for k in range(N+1):
        #    prog.AddConstraint(t1_kick[k]*(1-t1_kick[k])==0)

        # Hot-start
        guess_u1, guess_x1 = self.get_initial_guess(x_p1, p_goal, x_puck[0:2])
        prog.SetInitialGuess(x1, guess_x1)
        prog.SetInitialGuess(u1, guess_u1)
        if not self.prev_xp is None:
            prog.SetInitialGuess(xp, self.prev_xp)
            #prog.SetInitialGuess(t1_kick, np.ones_like(t1_kick))

        # solve for the periods
        # solver = SnoptSolver()
        #result = solver.Solve(prog)

        #if not result.is_success():
        #    print("Unable to find solution.")

        # save for hot-start
        #self.prev_xp = result.GetSolution(xp)

        #if True:
        #    print("x1:{}".format(result.GetSolution(x1)))
        #    print("u1: {}".format( result.GetSolution(u1)))
        #    print("xp: {}".format( result.GetSolution(xp)))
        #   print('dist{}'.format(result.GetSolution(dist)))
        #    print("t1_kick:{}".format(result.GetSolution(t1_kick)))
        #    print("v1_kick:{}".format(result.GetSolution(v1_kick)))
        #   print("vp_kick:{}".format(result.GetSolution(vp_kick)))
        #    print("cost:{}".format(result.GetSolution(cost)))

        # return whether successful, and the initial player velocity
        #u1_opt = result.GetSolution(u1)
        return True, guess_u1[0, :], np.zeros((2))
コード例 #14
0
class FourierCollocationProblem:
    def __init__(self, system_dynamics, constraints):
        start_time = time.time()

        self.prog = MathematicalProgram()
        self.N = 50  # Number of collocation points
        self.M = 10  # Number of frequencies
        self.system_dynamics = system_dynamics

        self.psi = np.pi * (0.7)  # TODO change

        (
            min_height,
            max_height,
            min_vel,
            max_vel,
            h0,
            min_travelled_distance,
            t_f_min,
            t_f_max,
            avg_vel_min,
            avg_vel_max,
        ) = constraints

        initial_pos = np.array([0, 0, h0])

        # Add state trajectory parameters as decision variables
        self.coeffs = self.prog.NewContinuousVariables(
            3, self.M + 1, "c"
        )  # (x,y,z) for every frequency
        self.phase_delays = self.prog.NewContinuousVariables(3, self.M, "eta")
        self.t_f = self.prog.NewContinuousVariables(1, 1, "t_f")[0, 0]
        self.avg_vel = self.prog.NewContinuousVariables(1, 1, "V_bar")[0, 0]

        # Enforce initial conditions
        residuals = self.get_pos_fourier(collocation_time=0) - initial_pos
        for residual in residuals:
            self.prog.AddConstraint(residual == 0)

        # Enforce dynamics at collocation points
        for n in range(self.N):
            collocation_time = (n / self.N) * self.t_f
            pos = self.get_pos_fourier(collocation_time)
            vel = self.get_vel_fourier(collocation_time)
            vel_dot = self.get_vel_dot_fourier(collocation_time)

            residuals = self.continuous_dynamics(pos, vel, vel_dot)
            for residual in residuals[3:6]:  # TODO only these last three are residuals
                self.prog.AddConstraint(residual == 0)

            if True:
                # Add velocity constraints
                squared_vel_norm = vel.T.dot(vel)
                self.prog.AddConstraint(min_vel ** 2 <= squared_vel_norm)
                self.prog.AddConstraint(squared_vel_norm <= max_vel ** 2)

                # Add height constraints
                self.prog.AddConstraint(pos[2] <= max_height)
                self.prog.AddConstraint(min_height <= pos[2])

        # Add constraint on min travelled distance
        self.prog.AddConstraint(min_travelled_distance <= self.avg_vel * self.t_f)

        # Add constraints on coefficients
        if False:
            for coeff in self.coeffs.T:
                lb = np.array([-500, -500, -500])
                ub = np.array([500, 500, 500])
                self.prog.AddBoundingBoxConstraint(lb, ub, coeff)

        # Add constraints on phase delays
        if False:
            for etas in self.phase_delays.T:
                lb = np.array([0, 0, 0])
                ub = np.array([2 * np.pi, 2 * np.pi, 2 * np.pi])
                self.prog.AddBoundingBoxConstraint(lb, ub, etas)

        # Add constraints on final time and avg vel
        self.prog.AddBoundingBoxConstraint(t_f_min, t_f_max, self.t_f)
        self.prog.AddBoundingBoxConstraint(avg_vel_min, avg_vel_max, self.avg_vel)

        # Add objective function
        self.prog.AddCost(-self.avg_vel)

        # Set initial guess
        coeffs_guess = np.zeros((3, self.M + 1))
        coeffs_guess += np.random.rand(*coeffs_guess.shape) * 100

        self.prog.SetInitialGuess(self.coeffs, coeffs_guess)

        phase_delays_guess = np.zeros((3, self.M))
        phase_delays_guess += np.random.rand(*phase_delays_guess.shape) * 1e-1

        self.prog.SetInitialGuess(self.phase_delays, phase_delays_guess)
        self.prog.SetInitialGuess(self.avg_vel, (avg_vel_max - avg_vel_min) / 2)
        self.prog.SetInitialGuess(self.t_f, (t_f_max - t_f_min) / 2)

        print(
            "Finished formulating the optimization problem in: {0} s".format(
                time.time() - start_time
            )
        )

        start_solve_time = time.time()
        self.result = Solve(self.prog)
        print("Found solution: {0}".format(self.result.is_success()))
        print("Found a solution in: {0} s".format(time.time() - start_solve_time))

        # TODO input costs
        # assert self.result.is_success()

        return

    def get_solution(self):
        coeffs_opt = self.result.GetSolution(self.coeffs)
        phase_delays_opt = self.result.GetSolution(self.phase_delays)
        t_f_opt = self.result.GetSolution(self.t_f)
        avg_vel_opt = self.result.GetSolution(self.avg_vel)

        sim_N = 100
        dt = t_f_opt / sim_N
        times = np.arange(0, t_f_opt, dt)
        pos_traj = np.zeros((3, sim_N))
        # TODO remove vel traj
        vel_traj = np.zeros((3, sim_N))
        for i in range(sim_N):
            t = times[i]
            pos = self.evaluate_pos_traj(
                coeffs_opt, phase_delays_opt, t_f_opt, avg_vel_opt, t
            )
            pos_traj[:, i] = pos
            vel = self.evaluate_vel_traj(
                coeffs_opt, phase_delays_opt, t_f_opt, avg_vel_opt, t
            )
            vel_traj[:, i] = vel

        # TODO move plotting out
        plot_trj_3_wind(pos_traj.T, np.array([np.sin(self.psi), np.cos(self.psi), 0]))

        plt.show()
        breakpoint()

        return

    def evaluate_pos_traj(self, coeffs, phase_delays, t_f, avg_vel, t):
        pos_traj = np.copy(coeffs[:, 0])
        for m in range(1, self.M):
            sine_terms = np.array(
                [
                    np.sin((2 * np.pi * m * t) / t_f + phase_delays[0, m]),
                    np.sin((2 * np.pi * m * t) / t_f + phase_delays[1, m]),
                    np.sin((2 * np.pi * m * t) / t_f + phase_delays[2, m]),
                ]
            )
            pos_traj += coeffs[:, m] * sine_terms

        direction_term = np.array(
            [
                avg_vel * np.sin(self.psi) * t,
                avg_vel * np.cos(self.psi) * t,
                0,
            ]
        )
        pos_traj += direction_term
        return pos_traj

    def evaluate_vel_traj(self, coeffs, phase_delays, t_f, avg_vel, t):
        vel_traj = np.array([0, 0, 0], dtype=object)
        for m in range(1, self.M):
            cos_terms = np.array(
                [
                    (2 * np.pi * m)
                    / t_f
                    * np.cos((2 * np.pi * m * t) / t_f + phase_delays[0, m]),
                    (2 * np.pi * m)
                    / t_f
                    * np.cos((2 * np.pi * m * t) / t_f + phase_delays[1, m]),
                    (2 * np.pi * m)
                    / t_f
                    * np.cos((2 * np.pi * m * t) / t_f + phase_delays[2, m]),
                ]
            )
            vel_traj += coeffs[:, m] * cos_terms

        direction_term = np.array(
            [
                avg_vel * np.sin(self.psi),
                avg_vel * np.cos(self.psi),
                0,
            ]
        )
        vel_traj += direction_term
        return vel_traj

    def get_pos_fourier(self, collocation_time):
        pos_traj = np.copy(self.coeffs[:, 0])
        for m in range(1, self.M):
            a = (2 * np.pi * m) / self.t_f
            sine_terms = np.array(
                [
                    np.sin(a * collocation_time + self.phase_delays[0, m]),
                    np.sin(a * collocation_time + self.phase_delays[1, m]),
                    np.sin(a * collocation_time + self.phase_delays[2, m]),
                ]
            )
            pos_traj += self.coeffs[:, m] * sine_terms

        direction_term = np.array(
            [
                self.avg_vel * np.sin(self.psi) * collocation_time,
                self.avg_vel * np.cos(self.psi) * collocation_time,
                0,
            ]
        )
        pos_traj += direction_term

        return pos_traj

    def get_vel_fourier(self, collocation_time):
        vel_traj = np.array([0, 0, 0], dtype=object)
        for m in range(1, self.M):
            a = (2 * np.pi * m) / self.t_f
            cos_terms = np.array(
                [
                    a * np.cos(a * collocation_time + self.phase_delays[0, m]),
                    a * np.cos(a * collocation_time + self.phase_delays[1, m]),
                    a * np.cos(a * collocation_time + self.phase_delays[2, m]),
                ]
            )
            vel_traj += self.coeffs[:, m] * cos_terms

        direction_term = np.array(
            [
                self.avg_vel * np.sin(self.psi),
                self.avg_vel * np.cos(self.psi),
                0,
            ]
        )
        vel_traj += direction_term
        return vel_traj

    def get_vel_dot_fourier(self, collocation_time):
        vel_dot_traj = np.array([0, 0, 0], dtype=object)
        for m in range(1, self.M):
            a = (2 * np.pi * m) / self.t_f
            sine_terms = np.array(
                [
                    -(a ** 2) * np.sin(a * collocation_time + self.phase_delays[0, m]),
                    -(a ** 2) * np.sin(a * collocation_time + self.phase_delays[1, m]),
                    -(a ** 2) * np.sin(a * collocation_time + self.phase_delays[2, m]),
                ]
            )
            vel_dot_traj += self.coeffs[:, m] * sine_terms
        return vel_dot_traj

    def continuous_dynamics(self, pos, vel, vel_dot):
        x = np.concatenate((pos, vel))
        x_dot = np.concatenate((vel, vel_dot))

        # TODO need to somehow implement input to make this work

        return x_dot - self.system_dynamics(x, u)
コード例 #15
0
import numpy as np
from pydrake.all import Quaternion
from pydrake.all import MathematicalProgram, Solve, eq, le, ge, SolverOptions, SnoptSolver
import pdb

prog = MathematicalProgram()
x = prog.NewContinuousVariables(rows=1, name='x')
y = prog.NewContinuousVariables(rows=1, name='y')
slack = prog.NewContinuousVariables(rows=1, name="slack")

prog.AddConstraint(eq(x * y, slack))
prog.AddLinearConstraint(ge(x, 0))
prog.AddLinearConstraint(ge(y, 0))
prog.AddLinearConstraint(le(x, 1))
prog.AddLinearConstraint(le(y, 1))
prog.AddCost(1e5 * (slack**2)[0])
prog.AddCost(-(2 * x[0] + y[0]))

solver = SnoptSolver()
result = solver.Solve(prog)
print(
    f"Success: {result.is_success()}, x = {result.GetSolution(x)}, y = {result.GetSolution(y)}, slack = {result.GetSolution(slack)}"
)
コード例 #16
0
def formulate_optimization(environment_name, obstacles):
    '''
    Generate and return an optimization problem. All decision variables,
    constraints, and costs are added here.
    '''
    def discrete_dynamics(state, state_next, thrust):
        '''Forward euler method to approximate discretized dynamics'''
        state_dot = generate_dynamics(state, thrust, env_name=environment_name)
        residuals = state_next - state - time_interval * state_dot
        return residuals

    def squared_distance_to_obstacle(boat_pos, obs_pos):
        '''Compute the squared distance from the boat to an obstacle'''
        vec = boat_pos - obs_pos
        return vec.dot(vec)

    # initialize optimization
    prog = MathematicalProgram()
    opt_state = prog.NewContinuousVariables(duration + 1, 6, 'optimal_state')
    opt_thrust = prog.NewContinuousVariables(duration, 2, 'optimal_thrust')

    if opt_switches[0]:
        # initial state constraint
        for i in range(len(start)):
            prog.AddConstraint(opt_state[0, i] - start[i], 0.,
                               0.).evaluator().set_description("Initial State")

    if opt_switches[1]:
        # goal state constraint
        for i in range(len(start)):
            prog.AddConstraint(
                opt_state[-1, i] - target[i], -final_tolerances[i],
                final_tolerances[i]).evaluator().set_description(
                    f"Final State {i}")

    if opt_switches[2]:
        # enforce dynamics
        for t in range(duration):
            residuals = discrete_dynamics(opt_state[t], opt_state[t + 1],
                                          opt_thrust[t])
            for i, residual in enumerate(residuals):
                prog.AddConstraint(residual, -dynamics_tolerances[i],
                                   dynamics_tolerances[i])

    if opt_switches[3]:
        # thrust limits
        for t in range(duration):
            prog.AddConstraint(opt_thrust[t][0], -thrust_lim[0], thrust_lim[0])
            prog.AddConstraint(opt_thrust[t][1], -thrust_lim[1], thrust_lim[1])

    if opt_switches[4]:
        # avoid collisions
        for i, obstacle in enumerate(obstacles):
            for t in range(duration + 1):
                d2 = squared_distance_to_obstacle(opt_state[t, 0:2],
                                                  obstacle.traj[t])
                prog.AddConstraint(d2 >= obstacle.radius**2 + (w / 2)**2)

    if opt_switches[5]:
        # fuel cost
        for t in range(duration):
            prog.AddCost(time_interval * opt_thrust[t, 0]**2)

    if opt_switches[6]:
        # distance cost
        for t in range(duration):
            prog.AddCost(time_interval *
                         (opt_state[t, 3]**2 + opt_state[t, 4]**2))

    return prog, opt_state, opt_thrust
コード例 #17
0
    def compute_trajectory_to_other_world(self, state_initial, minimum_time,
                                          maximum_time):
        '''
        Your mission is to implement this function.

        A successful implementation of this function will compute a dynamically feasible trajectory
        which satisfies these criteria:
            - Efficiently conserve fuel
            - Reach "orbit" of the far right world
            - Approximately obey the dynamic constraints
            - Begin at the state_initial provided
            - Take no more than maximum_time, no less than minimum_time

        The above are defined more precisely in the provided notebook.

        Please note there are two return args.

        :param: state_initial: :param state_initial: numpy array of length 4, see rocket_dynamics for documentation
        :param: minimum_time: float, minimum time allowed for trajectory
        :param: maximum_time: float, maximum time allowed for trajectory

        :return: three return args separated by commas:

            trajectory, input_trajectory, time_array

            trajectory: a 2d array with N rows, and 4 columns. See simulate_states_over_time for more documentation.
            input_trajectory: a 2d array with N-1 row, and 2 columns. See simulate_states_over_time for more documentation.
            time_array: an array with N rows. 

        '''

        mp = MathematicalProgram()

        max_speed = 0.99
        desired_distance = 0.5
        u_cost_factor = 1000.
        N = 50
        #         trajectory = np.zeros((N+1,4))
        #         input_trajectory = np.ones((N,2))*10.0
        time_used = (maximum_time + minimum_time) / 2.
        time_step = time_used / (N + 1)
        time_array = np.arange(0.0, time_used, time_step)

        k = 0
        # Create continuous variables for u & x
        u = mp.NewContinuousVariables(2, "u_%d" % k)
        x = mp.NewContinuousVariables(4, "x_%d" % k)
        u_over_time = u
        x_over_time = x

        for k in range(1, N):
            u = mp.NewContinuousVariables(2, "u_%d" % k)
            x = mp.NewContinuousVariables(4, "x_%d" % k)
            u_over_time = np.vstack((u_over_time, u))
            x_over_time = np.vstack((x_over_time, x))

        # trajectory is N+1 in size
        x = mp.NewContinuousVariables(4, "x_%d" % (N + 1))
        x_over_time = np.vstack((x_over_time, x))

        # Add cost
        #         for k in range(0, N):
        mp.AddQuadraticCost(u_cost_factor *
                            u_over_time[:, 0].dot(u_over_time[:, 0]))
        mp.AddQuadraticCost(u_cost_factor *
                            u_over_time[:, 1].dot(u_over_time[:, 1]))

        # Add constraint for initial state
        mp.AddLinearConstraint(x_over_time[0, 0] >= state_initial[0])
        mp.AddLinearConstraint(x_over_time[0, 0] <= state_initial[0])
        mp.AddLinearConstraint(x_over_time[0, 1] >= state_initial[1])
        mp.AddLinearConstraint(x_over_time[0, 1] <= state_initial[1])
        mp.AddLinearConstraint(x_over_time[0, 2] >= state_initial[2])
        mp.AddLinearConstraint(x_over_time[0, 2] <= state_initial[2])
        mp.AddLinearConstraint(x_over_time[0, 3] >= state_initial[3])
        mp.AddLinearConstraint(x_over_time[0, 3] <= state_initial[3])

        # Add constraint between x & u
        for k in range(1, N + 1):
            next_step = self.rocket_dynamics(x_over_time[k - 1, :],
                                             u_over_time[k - 1, :])
            mp.AddConstraint(x_over_time[k, 0] == x_over_time[k - 1, 0] +
                             time_step * next_step[0])
            mp.AddConstraint(x_over_time[k, 1] == x_over_time[k - 1, 1] +
                             time_step * next_step[1])
            mp.AddConstraint(x_over_time[k, 2] == x_over_time[k - 1, 2] +
                             time_step * next_step[2])
            mp.AddConstraint(x_over_time[k, 3] == x_over_time[k - 1, 3] +
                             time_step * next_step[3])

        # Make sure it never goes too far from the planets
#         for k in range(1, N):
#             mp.AddConstraint(self.two_norm(x_over_time[k,0:2] - self.world_2_position[:]) <= 10)
#             mp.AddConstraint(self.two_norm(x_over_time[k,0:2] - self.world_1_position[:]) <= 10)

# Make sure u never goes above a threshold
        max_u = 6.
        for k in range(0, N):
            mp.AddLinearConstraint(u_over_time[k, 0] <= max_u)
            mp.AddLinearConstraint(-u_over_time[k, 0] <= max_u)
            mp.AddLinearConstraint(u_over_time[k, 1] <= max_u)
            mp.AddLinearConstraint(-u_over_time[k, 1] <= max_u)

        # Make sure it reaches world 2
        mp.AddConstraint(
            self.two_norm(x_over_time[-1, 0:2] -
                          self.world_2_position) <= desired_distance)
        mp.AddConstraint(
            self.two_norm(x_over_time[-1, 0:2] -
                          self.world_2_position) >= desired_distance)

        # Make sure it's speed isn't too high
        mp.AddConstraint(self.two_norm(x_over_time[-1, 2:4]) <= max_speed**2.)

        # Get result
        result = Solve(mp)
        x_over_time_result = result.GetSolution(x_over_time)
        u_over_time_result = result.GetSolution(u_over_time)
        print("Success", result.is_success())
        print("Final position", x_over_time_result[-1, :])
        print(
            "Final distance to world2",
            self.two_norm(x_over_time_result[-1, 0:2] - self.world_2_position))
        print("Final speed", self.two_norm(x_over_time_result[-1, 2:4]))
        print("Fuel consumption", (u_over_time_result**2.).sum())

        trajectory = x_over_time_result
        input_trajectory = u_over_time_result
        return trajectory, input_trajectory, time_array
コード例 #18
0
    def solve(self, quad_start_q, quad_final_q, time_used):
        mp = MathematicalProgram()

        # We want to solve this for a certain number of knot points
        N = 40  # num knot points
        time_increment = time_used / (N + 1)
        dt = time_increment
        time_array = np.arange(0.0, time_used, time_increment)

        quad_u = mp.NewContinuousVariables(2, "u_0")
        quad_q = mp.NewContinuousVariables(6, "quad_q_0")

        for i in range(1, N):
            u = mp.NewContinuousVariables(2, "u_%d" % i)
            quad = mp.NewContinuousVariables(6, "quad_q_%d" % i)

            quad_u = np.vstack((quad_u, u))
            quad_q = np.vstack((quad_q, quad))

        for i in range(N):
            mp.AddLinearConstraint(quad_u[i][0] <= 3.0)  # force
            mp.AddLinearConstraint(quad_u[i][0] >= 0.0)  # force
            mp.AddLinearConstraint(quad_u[i][1] <= 10.0)  # torque
            mp.AddLinearConstraint(quad_u[i][1] >= -10.0)  # torque

            mp.AddLinearConstraint(quad_q[i][0] <= 1000.0)  # pos x
            mp.AddLinearConstraint(quad_q[i][0] >= -1000.0)
            mp.AddLinearConstraint(quad_q[i][1] <= 1000.0)  # pos y
            mp.AddLinearConstraint(quad_q[i][1] >= -1000.0)
            mp.AddLinearConstraint(
                quad_q[i][2] <= 60.0 * np.pi / 180.0)  # pos theta
            mp.AddLinearConstraint(quad_q[i][2] >= -60.0 * np.pi / 180.0)
            mp.AddLinearConstraint(quad_q[i][3] <= 1000.0)  # vel x
            mp.AddLinearConstraint(quad_q[i][3] >= -1000.0)
            mp.AddLinearConstraint(quad_q[i][4] <= 1000.0)  # vel y
            mp.AddLinearConstraint(quad_q[i][4] >= -1000.0)
            mp.AddLinearConstraint(quad_q[i][5] <= 10.0)  # vel theta
            mp.AddLinearConstraint(quad_q[i][5] >= -10.0)

        for i in range(1, N):
            quad_q_dyn_feasible = self.dynamics(quad_q[i - 1, :],
                                                quad_u[i - 1, :], dt)

            # Direct transcription constraints on states to dynamics
            for j in range(6):
                quad_state_err = (quad_q[i][j] - quad_q_dyn_feasible[j])
                eps = 0.001
                mp.AddConstraint(quad_state_err <= eps)
                mp.AddConstraint(quad_state_err >= -eps)

        # Initial and final quad and ball states
        for j in range(6):
            mp.AddLinearConstraint(quad_q[0][j] == quad_start_q[j])
            mp.AddLinearConstraint(quad_q[-1][j] == quad_final_q[j])

        # Quadratic cost on the control input
        R_force = 10.0
        R_torque = 100.0
        Q_quad_x = 100.0
        Q_quad_y = 100.0
        mp.AddQuadraticCost(R_force * quad_u[:, 0].dot(quad_u[:, 0]))
        mp.AddQuadraticCost(R_torque * quad_u[:, 1].dot(quad_u[:, 1]))
        mp.AddQuadraticCost(Q_quad_x * quad_q[:, 0].dot(quad_q[:, 1]))
        mp.AddQuadraticCost(Q_quad_y * quad_q[:, 1].dot(quad_q[:, 1]))

        # Solve the optimization
        print "Number of decision vars: ", mp.num_vars()

        # mp.SetSolverOption(SolverType.kSnopt, "Major iterations limit", 100000)

        print "Solve: ", mp.Solve()

        quad_traj = mp.GetSolution(quad_q)
        input_traj = mp.GetSolution(quad_u)

        return (quad_traj, input_traj, time_array)
コード例 #19
0
d[2] = 1
d[3] = -1

d = np.array([5.0752e+01, 4.7343e+05, 8.4125e+05, 6.2668e+05])

phi0 = -36332.36234347365

print("Qp : ", Quadratic_Positive_def)
print("Qp det: ", QP_det)

# Quadratic cost : u^TQu + c^Tu
CLF_QP_cost_v_effective = np.dot(u_var, np.dot(Quadratic_Positive_def,
                                               u_var)) + np.dot(c, u_var)

prog.AddQuadraticCost(CLF_QP_cost_v_effective)
prog.AddConstraint(np.dot(d, u_var) + phi0 <= 0)

solver = IpoptSolver()

prog.Solve()
# solver.Solve(prog)
print("Optimal u : ", prog.GetSolution(u_var))
u_CLF_QP = prog.GetSolution(u_var)

# ('A_fl:', )
# ('A_fl_det:', 137180180557.17741)
# ('Qp : ', array([[ 1.0000e+00, -1.5475e-13,  4.0035e-14,  3.7932e-15],
#        [-1.5475e-13,  5.7298e+07,  2.1803e+05, -3.3461e+06],
#        [ 4.0035e-14,  2.1803e+05,  6.2061e+07,  3.5442e+07],
#        [ 3.7932e-15, -3.3461e+06,  3.5442e+07,  2.5742e+07]]))
# ('Qp det: ', 1.8818401937699662e+22)
コード例 #20
0
    q, qprev, w_axis, w_mag, dt = np.split(
        q_qprev_v_dt, [4, 4 + 4, 4 + 4 + 3, 4 + 4 + 3 + 1])
    return q - apply_angular_velocity_to_quaternion(qprev, w_axis, w_mag, dt)


N = 4
prog = MathematicalProgram()
q = prog.NewContinuousVariables(rows=N, cols=4, name='q')
w_axis = prog.NewContinuousVariables(rows=N, cols=3, name="w_axis")
w_mag = prog.NewContinuousVariables(rows=N, cols=1, name="w_mag")
# dt = [0.0] + [1.0/(N-1)] * (N-1)
dt = prog.NewContinuousVariables(rows=N, cols=1, name="dt")

for k in range(N):
    (prog.AddConstraint(lambda x: [x @ x], [1], [
        1
    ], q[k]).evaluator().set_description(f"q[{k}] unit quaternion constraint"))
    # Impose unit length constraint on the rotation axis.
    (prog.AddConstraint(lambda x: [x @ x], [1], [1],
                        w_axis[k]).evaluator().set_description(
                            f"w_axis[{k}] unit axis constraint"))
for k in range(1, N):
    (prog.AddConstraint(
        lambda q_qprev_v_dt: backward_euler(q_qprev_v_dt),
        lb=[0.0] * 4,
        ub=[0.0] * 4,
        vars=np.concatenate([
            q[k], q[k - 1], w_axis[k], w_mag[k], dt[k]
        ])).evaluator().set_description(f"q[{k}] backward euler constraint"))

(prog.AddLinearConstraint(eq(q[0], np.array(
コード例 #21
0
x = np.empty((2, N), dtype=Variable)
for n in range(0, N - 1):
    u[:, n] = prog.NewContinuousVariables(1, 'u' + str(n))
    x[:, n] = prog.NewContinuousVariables(2, 'x' + str(n))
x[:, N - 1] = prog.NewContinuousVariables(2, 'x' + str(N))

# Add constraints at every knot point
x0 = [-2, 0]
prog.AddBoundingBoxConstraint(x0, x0, x[:, 0])
for n in range(0, N - 1):
    # Add the dynamics as an equality constraint
    # prog.AddConstraint(eq(x[:,n+1], A.dot(x[:,n]) + B.dot(u[:,n])))
    # Add the dynamics as a function handle constraint
    prog.AddConstraint(dynamicsCstr,
                       lb=np.array([0., 0.]),
                       ub=np.array([0., 0.]),
                       vars=np.concatenate((x[:, n], u[:, n], x[:, n + 1]),
                                           axis=0),
                       description="dynamics")
    prog.AddBoundingBoxConstraint(-1, 1, u[:, n])
xf = [0, 0]
prog.AddBoundingBoxConstraint(xf, xf, x[:, N - 1])

# Solve the problem
result = Solve(prog)

x_sol = result.GetSolution(x)
print(f"Optimization successful? {result.is_success()}")

# Display the optimized trajectories
plt.figure()
plt.plot(x_sol[0, :], x_sol[1, :])
コード例 #22
0
def test_Feedback_Linearization_controller_BS(x, t):
    # Output feedback linearization 2
    #
    # y1= ||r-rf||^2/2
    # y2= wx
    # y3= wy
    # y4= wz
    #
    # Analysis: The zero dynamics is unstable.
    global g, xf, Aout, Bout, Mout, T_period, w_freq, radius

    print("%%%%%%%%%%%%%%%%%%%%%")
    print("%%CLF-QP  %%%%%%%%%%%")
    print("%%%%%%%%%%%%%%%%%%%%%")

    print("t:", t)
    prog = MathematicalProgram()
    u_var = prog.NewContinuousVariables(4, "u_var")
    c_var = prog.NewContinuousVariables(1, "c_var")

    # # # Example 1 : Circle

    x_f = radius * math.cos(w_freq * t)
    y_f = radius * math.sin(w_freq * t)
    # print("x_f:",x_f)
    # print("y_f:",y_f)
    dx_f = -radius * math.pow(w_freq, 1) * math.sin(w_freq * t)
    dy_f = radius * math.pow(w_freq, 1) * math.cos(w_freq * t)
    ddx_f = -radius * math.pow(w_freq, 2) * math.cos(w_freq * t)
    ddy_f = -radius * math.pow(w_freq, 2) * math.sin(w_freq * t)
    dddx_f = radius * math.pow(w_freq, 3) * math.sin(w_freq * t)
    dddy_f = -radius * math.pow(w_freq, 3) * math.cos(w_freq * t)
    ddddx_f = radius * math.pow(w_freq, 4) * math.cos(w_freq * t)
    ddddy_f = radius * math.pow(w_freq, 4) * math.sin(w_freq * t)

    # # Example 2 : Lissajous curve a=1 b=2
    # ratio_ab=2;
    # a=1;
    # b=ratio_ab*a;
    # delta_lissajous = 0 #math.pi/2;

    # x_f = radius*math.sin(a*w_freq*t+delta_lissajous)
    # y_f = radius*math.sin(b*w_freq*t)
    # # print("x_f:",x_f)
    # # print("y_f:",y_f)
    # dx_f = radius*math.pow(a*w_freq,1)*math.cos(a*w_freq*t+delta_lissajous)
    # dy_f = radius*math.pow(b*w_freq,1)*math.cos(b*w_freq*t)
    # ddx_f = -radius*math.pow(a*w_freq,2)*math.sin(a*w_freq*t+delta_lissajous)
    # ddy_f = -radius*math.pow(b*w_freq,2)*math.sin(b*w_freq*t)
    # dddx_f = -radius*math.pow(a*w_freq,3)*math.cos(a*w_freq*t+delta_lissajous)
    # dddy_f = -radius*math.pow(b*w_freq,3)*math.cos(b*w_freq*t)
    # ddddx_f = radius*math.pow(a*w_freq,4)*math.sin(a*w_freq*t+delta_lissajous)
    # ddddy_f = radius*math.pow(b*w_freq,4)*math.sin(b*w_freq*t)

    e1 = np.array([1, 0, 0])  # e3 elementary vector
    e2 = np.array([0, 1, 0])  # e3 elementary vector
    e3 = np.array([0, 0, 1])  # e3 elementary vector

    # epsilonn= 1e-0
    # alpha = 100;
    # kp1234=alpha*1/math.pow(epsilonn,4) # gain for y
    # kd1=4/math.pow(epsilonn,3) # gain for y^(1)
    # kd2=12/math.pow(epsilonn,2) # gain for y^(2)
    # kd3=4/math.pow(epsilonn,1)  # gain for y^(3)

    # kp5= 10;                    # gain for y5

    q = np.zeros(7)
    qd = np.zeros(6)
    q = x[0:8]
    qd = x[8:15]

    print("qnorm:", np.dot(q[3:7], q[3:7]))

    q0 = q[3]
    q1 = q[4]
    q2 = q[5]
    q3 = q[6]
    xi1 = q[7]

    v = qd[0:3]
    w = qd[3:6]
    xi2 = qd[6]

    xd = xf[0]
    yd = xf[1]
    zd = xf[2]
    wd = xf[11:14]

    # Useful vectors and matrices

    (Rq, Eq, wIw, I_inv) = robobee_plantBS.GetManipulatorDynamics(q, qd)

    F1q = np.zeros((3, 4))
    F1q[0, :] = np.array([q2, q3, q0, q1])
    F1q[1, :] = np.array([-1 * q1, -1 * q0, q3, q2])
    F1q[2, :] = np.array([q0, -1 * q1, -1 * q2, q3])

    Rqe3 = np.dot(Rq, e3)
    Rqe3_hat = np.zeros((3, 3))
    Rqe3_hat[0, :] = np.array([0, -Rqe3[2], Rqe3[1]])
    Rqe3_hat[1, :] = np.array([Rqe3[2], 0, -Rqe3[0]])
    Rqe3_hat[2, :] = np.array([-Rqe3[1], Rqe3[0], 0])

    Rqe1 = np.dot(Rq, e1)
    Rqe1_hat = np.zeros((3, 3))
    Rqe1_hat[0, :] = np.array([0, -Rqe1[2], Rqe1[1]])
    Rqe1_hat[1, :] = np.array([Rqe1[2], 0, -Rqe1[0]])
    Rqe1_hat[2, :] = np.array([-Rqe1[1], Rqe1[0], 0])

    Rqe1_x = np.dot(e2.T, Rqe1)
    Rqe1_y = np.dot(e1.T, Rqe1)

    w_hat = np.zeros((3, 3))
    w_hat[0, :] = np.array([0, -w[2], w[1]])
    w_hat[1, :] = np.array([w[2], 0, -w[0]])
    w_hat[2, :] = np.array([-w[1], w[0], 0])

    Rw = np.dot(Rq, w)
    Rw_hat = np.zeros((3, 3))
    Rw_hat[0, :] = np.array([0, -Rw[2], Rw[1]])
    Rw_hat[1, :] = np.array([Rw[2], 0, -Rw[0]])
    Rw_hat[2, :] = np.array([-Rw[1], Rw[0], 0])
    #- Checking the derivation

    # print("F1qEqT-(-Rqe3_hat)",np.dot(F1q,Eq.T)-(-Rqe3_hat))
    # Rqe3_cal = np.zeros(3)
    # Rqe3_cal[0] = 2*(q3*q1+q0*q2)
    # Rqe3_cal[1] = 2*(q3*q2-q0*q1)
    # Rqe3_cal[2] = (q0*q0+q3*q3-q1*q1-q2*q2)

    # print("Rqe3 - Rqe3_cal", Rqe3-Rqe3_cal)

    # Four output
    y1 = x[0] - x_f
    y2 = x[1] - y_f
    y3 = x[2] - zd - 0
    y4 = math.atan2(Rqe1_x, Rqe1_y) - math.pi / 8
    # print("Rqe1_x:", Rqe1_x)

    eta1 = np.zeros(3)
    eta1 = np.array([y1, y2, y3])
    eta5 = y4

    # print("y4", y4)

    # First derivative of first three output and yaw output
    eta2 = np.zeros(3)
    eta2 = v - np.array([dx_f, dy_f, 0])
    dy1 = eta2[0]
    dy2 = eta2[1]
    dy3 = eta2[2]

    x2y2 = (math.pow(Rqe1_x, 2) + math.pow(Rqe1_y, 2))  # x^2+y^2

    eta6_temp = np.zeros(3)  #eta6_temp = (ye2T-xe1T)/(x^2+y^2)
    eta6_temp = (Rqe1_y * e2.T - Rqe1_x * e1.T) / x2y2
    # print("eta6_temp:", eta6_temp)
    # Body frame w  ( multiply R)
    eta6 = np.dot(eta6_temp, np.dot(-Rqe1_hat, np.dot(Rq, w)))

    # World frame w
    # eta6 = np.dot(eta6_temp,np.dot(-Rqe1_hat,w))
    # print("Rqe1_hat:", Rqe1_hat)

    # Second derivative of first three output
    eta3 = np.zeros(3)
    eta3 = -g * e3 + Rqe3 * xi1 - np.array([ddx_f, ddy_f, 0])
    ddy1 = eta3[0]
    ddy2 = eta3[1]
    ddy3 = eta3[2]

    # Third derivative of first three output
    eta4 = np.zeros(3)
    # Body frame w ( multiply R)
    eta4 = Rqe3 * xi2 + np.dot(-Rqe3_hat, np.dot(Rq, w)) * xi1 - np.array(
        [dddx_f, dddy_f, 0])

    # World frame w
    # eta4 = Rqe3*xi2+np.dot(np.dot(F1q,Eq.T),w)*xi1 - np.array([dddx_f,dddy_f,0])
    dddy1 = eta4[0]
    dddy2 = eta4[1]
    dddy3 = eta4[2]

    # Fourth derivative of first three output
    B_qw_temp = np.zeros(3)
    # Body frame w
    B_qw_temp = xi1 * (-np.dot(Rw_hat, np.dot(Rqe3_hat, Rw)) +
                       np.dot(Rqe3_hat, np.dot(Rq, np.dot(I_inv, wIw)))
                       )  # np.dot(I_inv,wIw)*xi1-2*w*xi2
    B_qw = B_qw_temp + xi2 * (-2 * np.dot(Rqe3_hat, Rw)) - np.array(
        [ddddx_f, ddddy_f, 0])  #np.dot(Rqe3_hat,B_qw_temp)

    # World frame w
    # B_qw_temp = xi1*(-np.dot(w_hat,np.dot(Rqe3_hat,w))+np.dot(Rqe3_hat,np.dot(I_inv,wIw))) # np.dot(I_inv,wIw)*xi1-2*w*xi2
    # B_qw      = B_qw_temp+xi2*(-2*np.dot(Rqe3_hat,w)) - np.array([ddddx_f,ddddy_f,0])   #np.dot(Rqe3_hat,B_qw_temp)

    # B_qw = B_qw_temp - np.dot(w_hat,np.dot(Rqe3_hat,w))*xi1

    # Second derivative of yaw output

    # Body frame w
    dRqe1_x = np.dot(e2, np.dot(-Rqe1_hat, Rw))  # \dot{x}
    dRqe1_y = np.dot(e1, np.dot(-Rqe1_hat, Rw))  # \dot{y}
    alpha1 = 2 * (Rqe1_x * dRqe1_x +
                  Rqe1_y * dRqe1_y) / x2y2  # (2xdx +2ydy)/(x^2+y^2)

    # World frame w
    # dRqe1_x = np.dot(e2,np.dot(-Rqe1_hat,w)) # \dot{x}
    # dRqe1_y = np.dot(e1,np.dot(-Rqe1_hat,w)) # \dot{y}

    # alpha1 = 2*(Rqe1_x*dRqe1_x+Rqe1_y*dRqe1_y)/x2y2 # (2xdx +2ydy)/(x^2+y^2)
    # # alpha2 = math.pow(dRqe1_y,2)-math.pow(dRqe1_x,2)

    # Body frame w

    B_yaw_temp3 = np.zeros(3)
    B_yaw_temp3 = alpha1 * np.dot(Rqe1_hat, Rw) + np.dot(
        Rqe1_hat, np.dot(Rq, np.dot(I_inv, wIw))) - np.dot(
            Rw_hat, np.dot(Rqe1_hat, Rw))

    B_yaw = np.dot(eta6_temp,
                   B_yaw_temp3)  # +alpha2 :Could be an error in math.
    g_yaw = np.zeros(3)
    g_yaw = -np.dot(eta6_temp, np.dot(Rqe1_hat, np.dot(Rq, I_inv)))

    # World frame w
    # B_yaw_temp3 =np.zeros(3)
    # B_yaw_temp3 = alpha1*np.dot(Rqe1_hat,w)+np.dot(Rqe1_hat,np.dot(I_inv,wIw))-np.dot(w_hat,np.dot(Rqe1_hat,w))

    # B_yaw = np.dot(eta6_temp,B_yaw_temp3) # +alpha2 :Could be an error in math.
    # g_yaw = np.zeros(3)
    # g_yaw = -np.dot(eta6_temp,np.dot(Rqe1_hat,I_inv))

    # print("g_yaw:", g_yaw)
    # Decoupling matrix A(x)\in\mathbb{R}^4

    A_fl = np.zeros((4, 4))
    A_fl[0:3, 0] = Rqe3
    # Body frame w
    A_fl[0:3, 1:4] = -np.dot(Rqe3_hat, np.dot(Rq, I_inv)) * xi1
    # World frame w
    # A_fl[0:3,1:4] = -np.dot(Rqe3_hat,I_inv)*xi1
    A_fl[3, 1:4] = g_yaw

    A_fl_inv = np.linalg.inv(A_fl)
    A_fl_det = np.linalg.det(A_fl)
    # print("I_inv:", I_inv)
    print("A_fl:", A_fl)
    print("A_fl_det:", A_fl_det)

    # Drift in the output dynamics

    U_temp = np.zeros(4)
    U_temp[0:3] = B_qw
    U_temp[3] = B_yaw

    # Output dyamics

    eta = np.hstack([eta1, eta2, eta3, eta5, eta4, eta6])
    eta_norm = np.dot(eta, eta)

    # v-CLF QP controller

    FP_PF = np.dot(Aout.T, Mout) + np.dot(Mout, Aout)
    PG = np.dot(Mout, Bout)

    L_FVx = np.dot(eta, np.dot(FP_PF, eta))
    L_GVx = 2 * np.dot(eta.T, PG)  # row vector
    L_fhx_star = U_temp

    Vx = np.dot(eta, np.dot(Mout, eta))
    # phi0_exp = L_FVx+np.dot(L_GVx,L_fhx_star)+(min_e_Q/max_e_P)*Vx*1    # exponentially stabilizing
    # phi0_exp = L_FVx+np.dot(L_GVx,L_fhx_star)+min_e_Q*eta_norm      # more exact bound - exponentially stabilizing
    phi0_exp = L_FVx + np.dot(
        L_GVx, L_fhx_star)  # more exact bound - exponentially stabilizing

    phi1_decouple = np.dot(L_GVx, A_fl)

    # # Solve QP
    v_var = np.dot(A_fl, u_var) + L_fhx_star
    Quadratic_Positive_def = np.matmul(A_fl.T, A_fl)
    QP_det = np.linalg.det(Quadratic_Positive_def)
    c_QP = 2 * np.dot(L_fhx_star.T, A_fl)

    c_QP_extned = np.hstack([c_QP, -1])
    u_var_extended = np.hstack([u_var, c_var])

    # CLF_QP_cost_v = np.dot(v_var,v_var) // Exact quadratic cost
    CLF_QP_cost_v_effective = np.dot(
        u_var, np.dot(Quadratic_Positive_def, u_var)) + np.dot(
            c_QP, u_var) - c_var[0]  # Quadratic cost without constant term

    # CLF_QP_cost_u = np.dot(u_var,u_var)

    phi1 = np.dot(phi1_decouple, u_var) + c_var[0] * eta_norm

    #----Printing intermediate states

    # print("L_fhx_star: ",L_fhx_star)
    # print("c_QP:", c_QP)
    # print("Qp : ",Quadratic_Positive_def)
    # print("Qp det: ", QP_det)
    # print("c_QP", c_QP)

    # print("phi0_exp: ", phi0_exp)
    # print("PG:", PG)
    # print("L_GVx:", L_GVx)
    # print("eta6", eta6)
    # print("d : ", phi1_decouple)
    # print("Cost expression:", CLF_QP_cost_v)
    #print("Const expression:", phi0_exp+phi1)

    #----Different solver option // Gurobi did not work with python at this point (some binding issue 8/8/2018)
    solver = IpoptSolver()
    # solver = GurobiSolver()
    # print solver.available()
    # assert(solver.available()==True)
    # assertEqual(solver.solver_type(), mp.SolverType.kGurobi)
    # solver.Solve(prog)
    # assertEqual(result, mp.SolutionResult.kSolutionFound)

    # mp.AddLinearConstraint()
    # print("x:", x)
    # print("phi_0_exp:", phi0_exp)
    # print("phi1_decouple:", phi1_decouple)

    # print("eta1:", eta1)
    # print("eta2:", eta2)
    # print("eta3:", eta3)
    # print("eta4:", eta4)
    # print("eta5:", eta5)
    # print("eta6:", eta6)

    # Output Feedback Linearization controller
    mu = np.zeros(4)
    k = np.zeros((4, 14))
    k = np.matmul(Bout.T, Mout)
    k = np.matmul(np.linalg.inv(R), k)

    mu = -1 / 1 * np.matmul(k, eta)

    v = -U_temp + mu

    U_fl = np.matmul(
        A_fl_inv, v
    )  # Output Feedback controller to comare the result with CLF-QP solver

    # Set up the QP problem
    prog.AddQuadraticCost(CLF_QP_cost_v_effective)
    prog.AddConstraint(phi0_exp + phi1 <= 0)
    prog.AddConstraint(c_var[0] >= 0)
    prog.AddConstraint(c_var[0] <= 100)
    prog.AddConstraint(u_var[0] <= 30)
    prog.SetSolverOption(mp.SolverType.kIpopt, "print_level",
                         5)  # CAUTION: Assuming that solver used Ipopt

    print("CLF value:", Vx)  # Current CLF value

    prog.SetInitialGuess(u_var, U_fl)
    prog.Solve()  # Solve with default osqp

    # solver.Solve(prog)
    print("Optimal u : ", prog.GetSolution(u_var))
    U_CLF_QP = prog.GetSolution(u_var)
    C_CLF_QP = prog.GetSolution(c_var)

    #---- Printing for debugging
    # dx = robobee_plantBS.evaluate_f(U_fl,x)
    # print("dx", dx)
    # print("\n######################")
    # # print("qe3:", A_fl[0,0])
    # print("u:", u)
    # print("\n####################33")

    # deta4 = B_qw+Rqe3*U_fl_zero[0]+np.matmul(-np.matmul(Rqe3_hat,I_inv),U_fl_zero[1:4])*xi1
    # deta6 = B_yaw+np.dot(g_yaw,U_fl_zero[1:4])
    # print("deta4:",deta4)
    # print("deta6:",deta6)
    print(C_CLF_QP)

    phi1_opt = np.dot(phi1_decouple, U_CLF_QP) + C_CLF_QP * eta_norm
    phi1_opt_FL = np.dot(phi1_decouple, U_fl)

    print("FL u: ", U_fl)
    print("CLF u:", U_CLF_QP)
    print("Cost FL: ", np.dot(mu, mu))

    v_CLF = np.dot(A_fl, U_CLF_QP) + L_fhx_star
    print("Cost CLF: ", np.dot(v_CLF, v_CLF))
    print("Constraint FL : ", phi0_exp + phi1_opt_FL)
    print("Constraint CLF : ", phi0_exp + phi1_opt)
    u = U_CLF_QP

    print("eigenvalues minQ maxP:", [min_e_Q, max_e_P])

    return u
コード例 #23
0
    def ComputeControlInput(self, x, t):
        # Set up things you might want...
        q = x[0:self.nq]
        v = x[self.nq:]

        kinsol = self.hand.doKinematics(x[0:self.hand.get_num_positions()])
        M = self.hand.massMatrix(kinsol)
        C = self.hand.dynamicsBiasTerm(kinsol, {}, None)
        B = self.hand.B

        # Assume grasp points are achieved, and calculate
        # contact jacobian at those points, as well as the current
        # grasp points and normals (in WORLD FRAME).
        grasp_points_world_now = self.transform_grasp_points_manipuland(q)
        grasp_normals_world_now = self.transform_grasp_normals_manipuland(q)
        J_manipuland = jacobian(
            self.transform_grasp_points_manipuland, q)
        ee_points_now = self.transform_grasp_points_fingers(q)
        J_manipulator = jacobian(
            self.transform_grasp_points_fingers, q)

        # The contact jacobian (Phi), for contact forces coming from
        # point contacts, describes how the movement of the contact point
        # maps into the joint coordinates of the robot. We can get this
        # by combining the jacobians derived from forward kinematics to each
        # of the two bodies that are in contact, evaluated at the contact point
        # in each body's frame.
        J_contact = J_manipuland - J_manipulator
        # Cut off the 3rd dimension, which should always be 0
        J_contact = J_contact[0:2, :, :]
        # Given these, the manipulator equations enter as a linear constraint
        # M qdd + C = Bu + J_contact lambda
        # (the combined effects of lambda on the robot).
        # The list of grasp points, in manipuland frame, that we'll use.
        n_cf = len(self.grasp_points)
        number_of_grasp_points = len(self.grasp_points)
        # The evaluated desired manipuland posture.
        manipuland_qdes = self.GetDesiredObjectPosition(t)

        # The desired robot (arm) posture, calculated via inverse kinematics
        # to achieve the desired grasp points on the object in its current
        # target posture.
        qdes, info = self.ComputeTargetPosture(x, manipuland_qdes)
        # print('shape' + str(np.shape(qdes)))
        # print('self.nq' + str(self.nq))
        # print('self.nu' + str(self.nu))
        if info != 1:
            if not self.shut_up:
                print "Warning: target posture IK solve got info %d " \
                      "when computing goal posture at least once during " \
                      "simulation. This means the grasp points was hard to " \
                      "achieve given the current object posture. This is " \
                      "occasionally OK, but indicates that your controller " \
                      "is probably struggling a little." % info
                self.shut_up = True

        # From here, it's up to you. Following the guidelines in the problem
        # set, implement a controller to achieve the specified goals.

        kd = 1
        kp = 25

        from pydrake.all import MathematicalProgram, Solve
        mp = MathematicalProgram()

        u = mp.NewContinuousVariables(self.nu, "u")
        qdd = mp.NewContinuousVariables(self.nq, "qdd")
        x_y_dimensions = 2

        lambda_variable = mp.NewContinuousVariables(x_y_dimensions, number_of_grasp_points, "lambda_variable")

        forces = np.zeros((self.nq))
        for i in range(number_of_grasp_points):
            current_forces = np.transpose(J_contact)[:,i,:].dot(lambda_variable[:,i])
            forces = forces + current_forces

        leftHandSide = M.dot(qdd) + C
        rightHandSide = B.dot(u) + forces

        for i in range(len(leftHandSide)):
            mp.AddConstraint(leftHandSide[i] == rightHandSide[i])

        # Calculate Normals
        normals = np.array([])
        for i in range(number_of_grasp_points):
            current_grasp_normal = grasp_normals_world_now[0:2, i]
            normals = np.vstack((normals, current_grasp_normal)) if normals.size else current_grasp_normal

        # Calculate Tangeants
        tangeants = np.array([])
        for i in range(number_of_grasp_points):
            current_grasp_tangeant = np.array([normals[i, 1], -normals[i, 0]])
            tangeants = np.vstack((tangeants, current_grasp_tangeant)) if tangeants.size else current_grasp_tangeant

        # Create beta variables
        beta = mp.NewContinuousVariables(2, number_of_grasp_points, "b0")

        # Create Grasps
        for i in range(number_of_grasp_points):
            c0 = normals[i] - self.mu * tangeants[i]
            c1 = normals[i] + self.mu * tangeants[i]
            right_hand_lambda1 = c0 * beta[0, i] + c1 * beta[1, i]
            mp.AddConstraint(lambda_variable[0, i] == right_hand_lambda1[0])
            mp.AddConstraint(lambda_variable[1, i] == right_hand_lambda1[1])
            mp.AddConstraint(beta[0, i] >= 0)
            mp.AddConstraint(beta[1, i] >= 0)
            mp.AddConstraint(normals[i].dot(lambda_variable[:, i]) >= 0.25)

        # Copying the control period of the constructor. Probably not supposed to do this...
        next_tick_qd = v + qdd * self.cont¨rol_period
        next_tick_q = q + next_tick_qd * self.control_period

        q_error = qdes - next_tick_q
        proportionalCost = q_error.dot(np.transpose(q_error))
        qd_error = 0 - next_tick_qd
        diffCost = qd_error.dot(np.transpose(qd_error))
        mp.AddQuadraticCost(kp * proportionalCost + kd * diffCost)
        result = Solve(mp)
        u_solution = result.GetSolution(u)

        u = np.zeros(self.nu)
        return u_solution
コード例 #24
0
    def compute_opt_trajectory(self, state_initial, goal_func, verbose=True):
        '''
        nonlinear trajectory optimization to land drone starting at state_initial, on a vehicle target trajectory given by the goal_func

        :return: three return args separated by commas:

            trajectory, input_trajectory, time_array

            trajectory: a 2d array with N rows, and 6 columns. See simulate_states_over_time for more documentation.
            input_trajectory: a 2d array with N-1 row, and 4 columns. See simulate_states_over_time for more documentation.
            time_array: an array with N rows. 
        '''
        # initialize math program
        import time
        start_time = time.time()
        mp = MathematicalProgram()
        num_time_steps = 40

        booster = mp.NewContinuousVariables(3, "booster_0")
        booster_over_time = booster[np.newaxis, :]

        state = mp.NewContinuousVariables(6, "state_0")
        state_over_time = state[np.newaxis, :]

        dt = mp.NewContinuousVariables(1, "dt")

        for k in range(1, num_time_steps - 1):
            booster = mp.NewContinuousVariables(3, "booster_%d" % k)
            booster_over_time = np.vstack((booster_over_time, booster))
        for k in range(1, num_time_steps):
            state = mp.NewContinuousVariables(6, "state_%d" % k)
            state_over_time = np.vstack((state_over_time, state))

        goal_state = goal_func(dt[0] * 39)

        # calculate states over time
        for i in range(1, num_time_steps):
            sim_next_state = state_over_time[
                i - 1, :] + dt[0] * self.drone_dynamics(
                    state_over_time[i - 1, :], booster_over_time[i - 1, :])

            # add constraints to restrict the next state to the decision variables
            for j in range(6):
                mp.AddConstraint(sim_next_state[j] == state_over_time[i, j])

            # don't hit ground
            mp.AddLinearConstraint(state_over_time[i, 2] >= 0.05)

            # enforce that we must be thrusting within some constraints
            mp.AddLinearConstraint(booster_over_time[i - 1, 0] <= 5.0)
            mp.AddLinearConstraint(booster_over_time[i - 1, 0] >= -5.0)
            mp.AddLinearConstraint(booster_over_time[i - 1, 1] <= 5.0)
            mp.AddLinearConstraint(booster_over_time[i - 1, 1] >= -5.0)

            # keep forces in a reasonable position
            mp.AddLinearConstraint(booster_over_time[i - 1, 2] >= 1.0)
            mp.AddLinearConstraint(
                booster_over_time[i - 1, 0] <= booster_over_time[i - 1, 2])
            mp.AddLinearConstraint(
                booster_over_time[i - 1, 0] >= -booster_over_time[i - 1, 2])
            mp.AddLinearConstraint(
                booster_over_time[i - 1, 1] <= booster_over_time[i - 1, 2])
            mp.AddLinearConstraint(
                booster_over_time[i - 1, 1] >= -booster_over_time[i - 1, 2])

        # add constraints on initial state
        for i in range(6):
            mp.AddLinearConstraint(state_over_time[0, i] == state_initial[i])

        # add constraints on dt
        mp.AddLinearConstraint(dt[0] >= 0.001)

        # add constraints on end state

        # end goal velocity
        mp.AddConstraint(state_over_time[-1, 0] <= goal_state[0] + 0.01)
        mp.AddConstraint(state_over_time[-1, 0] >= goal_state[0] - 0.01)
        mp.AddConstraint(state_over_time[-1, 1] <= goal_state[1] + 0.01)
        mp.AddConstraint(state_over_time[-1, 1] >= goal_state[1] - 0.01)
        mp.AddConstraint(state_over_time[-1, 2] <= goal_state[2] + 0.01)
        mp.AddConstraint(state_over_time[-1, 2] >= goal_state[2] - 0.01)
        mp.AddConstraint(state_over_time[-1, 3] <= goal_state[3] + 0.01)
        mp.AddConstraint(state_over_time[-1, 3] >= goal_state[3] - 0.01)
        mp.AddConstraint(state_over_time[-1, 4] <= goal_state[4] + 0.01)
        mp.AddConstraint(state_over_time[-1, 4] >= goal_state[4] - 0.01)
        mp.AddConstraint(state_over_time[-1, 5] <= goal_state[5] + 0.01)
        mp.AddConstraint(state_over_time[-1, 5] >= goal_state[5] - 0.01)

        # add the cost function
        mp.AddQuadraticCost(
            0.01 * booster_over_time[:, 0].dot(booster_over_time[:, 0]))
        mp.AddQuadraticCost(
            0.01 * booster_over_time[:, 1].dot(booster_over_time[:, 1]))
        mp.AddQuadraticCost(
            0.01 * booster_over_time[:, 2].dot(booster_over_time[:, 2]))

        # add more penalty on dt because minimizing time turns out to be more important
        mp.AddQuadraticCost(10000 * dt[0] * dt[0])

        solved = mp.Solve()
        if verbose:
            print solved

        # extract
        booster_over_time = mp.GetSolution(booster_over_time)
        output_states = mp.GetSolution(state_over_time)
        dt = mp.GetSolution(dt)

        time_array = np.zeros(40)
        for i in range(40):
            time_array[i] = i * dt
        trajectory = self.simulate_states_over_time(state_initial, time_array,
                                                    booster_over_time)

        durations = time_array[1:len(time_array
                                     )] - time_array[0:len(time_array) - 1]
        fuel_consumption = (
            np.sum(booster_over_time[:len(time_array)]**2, axis=1) *
            durations).sum()

        if verbose:
            print 'expected remaining fuel consumption', fuel_consumption
            print("took %s seconds" % (time.time() - start_time))
            print ''

        return trajectory, booster_over_time, time_array, fuel_consumption
コード例 #25
0
    def ComputeControlInput(self, x, t):
        # Set up things you might want...
        q = x[0:self.nq]
        v = x[self.nq:]

        kinsol = self.hand.doKinematics(x[0:self.hand.get_num_positions()])
        M = self.hand.massMatrix(kinsol)
        C = self.hand.dynamicsBiasTerm(kinsol, {}, None)
        B = self.hand.B

        # Assume grasp points are achieved, and calculate
        # contact jacobian at those points, as well as the current
        # grasp points and normals (in WORLD FRAME).
        grasp_points_world_now = self.transform_grasp_points_manipuland(q)
        grasp_normals_world_now = self.transform_grasp_normals_manipuland(q)
        J_manipuland = jacobian(self.transform_grasp_points_manipuland, q)
        ee_points_now = self.transform_grasp_points_fingers(q)
        J_manipulator = jacobian(self.transform_grasp_points_fingers, q)

        # The contact jacobian (Phi), for contact forces coming from
        # point contacts, describes how the movement of the contact point
        # maps into the joint coordinates of the robot. We can get this
        # by combining the jacobians derived from forward kinematics to each
        # of the two bodies that are in contact, evaluated at the contact point
        # in each body's frame.
        J_contact = J_manipuland - J_manipulator
        # Cut off the 3rd dimension, which should always be 0
        J_contact = J_contact[0:2, :, :]
        # Given these, the manipulator equations enter as a linear constraint
        # M qdd + C = Bu + J_contact lambda
        # (the combined effects of lambda on the robot).
        # The list of grasp points, in manipuland frame, that we'll use.
        n_cf = len(self.grasp_points)
        # The evaluated desired manipuland posture.
        manipuland_qdes = self.GetDesiredObjectPosition(t)

        # The desired robot (arm) posture, calculated via inverse kinematics
        # to achieve the desired grasp points on the object in its current
        # target posture.
        qdes, info = self.ComputeTargetPosture(x, manipuland_qdes)
        if info != 1:
            if not self.shut_up:
                print "Warning: target posture IK solve got info %d " \
                      "when computing goal posture at least once during " \
                      "simulation. This means the grasp points was hard to " \
                      "achieve given the current object posture. This is " \
                      "occasionally OK, but indicates that your controller " \
                      "is probably struggling a little." % info
                self.shut_up = True

        # From here, it's up to you. Following the guidelines in the problem
        # set, implement a controller to achieve the specified goals.

        mp = MathematicalProgram()

        #control variables = u = self.nu x 1
        #ddot{q} as variable w/ q's shape
        controls = mp.NewContinuousVariables(self.nu, "controls")
        qdd = mp.NewContinuousVariables(q.shape[0], "qdd")

        #make variables for lambda's and beta's
        k = 0
        b = mp.NewContinuousVariables(2, "betas_%d" % k)
        betas = b
        for i in range(len(self.grasp_points)):
            b = mp.NewContinuousVariables(
                2, "betas_%d" % k)  #pair of betas for each contact point
            betas = np.vstack((betas, b))
            mp.AddLinearConstraint(
                betas[i, 0] >= 0).evaluator().set_description(
                    "beta_0 greater than 0 for %d" % i)
            mp.AddLinearConstraint(
                betas[i, 1] >= 0).evaluator().set_description(
                    "beta_1 greater than 0 for %d" % i)
        #c_0=normals+mu*tangent
        #c1 = normals-mu*tangent
        n = grasp_normals_world_now.T[:, 0:2]  #row=contact point?
        t = get_perpendicular2d(n[0])
        c_i1 = n[0] + np.dot(self.mu, t)
        c_i2 = n[0] - np.dot(self.mu, t)
        l = c_i1.dot(betas[0, 0]) + c_i2.dot(betas[0, 1])
        lambdas = l

        for i in range(1, len(self.grasp_points)):
            n = grasp_normals_world_now.T[:, 0:
                                          2]  #row=contact point? transpose then index
            t = get_perpendicular2d(n[i])
            c_i1 = n[i] - np.dot(self.mu, t)
            c_i2 = n[i] + np.dot(self.mu, t)
            l = c_i1.dot(betas[i, 0]) + c_i2.dot(betas[i, 1])
            lambdas = np.vstack((lambdas, l))

        control_period = 0.0333
        #Constrain the dyanmics
        dyn = M.dot(qdd) + C - B.dot(controls)
        for i in range(q.shape[0]):
            j_c = 0
            for l in range(len(self.grasp_points)):
                j_sub = J_contact[:, l, :].T
                j_c += j_sub.dot(lambdas[l])


#             print(j_c.shape)
#             print(dyn.shape)
            mp.AddConstraint(dyn[i] - j_c[i] == 0)

        #PD controller using kinematics
        Kp = 100.0
        Kd = 1.0
        control_period = 0.0333
        next_q = q + v.dot(control_period) + np.dot(qdd.dot(control_period**2),
                                                    .5)
        next_q_dot = v + qdd.dot(control_period)
        mp.AddQuadraticCost(Kp * (qdes - next_q).dot((qdes - next_q).T) + Kd *
                            (next_q_dot).dot(next_q_dot.T))
        Kp_ext = 100.
        mp.AddQuadraticCost(Kp_ext * (qdes - next_q)[-3:].dot(
            (qdes - next_q)[-3:].T))

        res = Solve(mp)
        c = res.GetSolution(controls)
        return c
コード例 #26
0
	L = np.linalg.cholesky(H)
	X = np.tile(xmin, vertices) + np.linalg.inv(np.transpose(L)).dot(Y)

	return ax.fill(X[0, :]+x0[0], X[1, :]+x0[1], color=color)
		
prog = MathematicalProgram()
x    = prog.NewIndeterminates(2, 'x')  
V1   = prog.NewSosPolynomial(Variables(x), 2)[0].ToExpression()
V2   = prog.NewSosPolynomial(Variables(x), 2)[0].ToExpression()

a = 0.5*Jacobian(V1.Jacobian(x),x) 
b = 0.5*Jacobian(V2.Jacobian(x),x) 

pxi = np.array([[-0.5,-0.5], [-0.5, 0.5], [0.5,0.5], [0.5,-0.5]])
for pt in pxi:
	prog.AddConstraint( pt.T.dot(a).dot(pt) <= 1.0)
	prog.AddConstraint( pt.T.dot(b).dot(pt) <= 1.0)
pxi = np.array([[0.0, 1.0] ])
prog.AddConstraint( pxi[-1].T.dot(b).dot(pxi[-1]) <= 1.0)

prog.AddMaximizeLogDeterminantSymmetricMatrixCost(a)
prog.AddMaximizeLogDeterminantSymmetricMatrixCost(b)

result = Solve(prog)
assert(result.is_success())
V1 = result.GetSolution(V1)
V2 = result.GetSolution(V2)

fig1, ax1 = plt.subplots()
ax1.set_xlim([-5.0, 5.0])
ax1.set_ylim([-5.0, 5.0])
コード例 #27
0
    def static_controller(self, qref, verbose=False):
        """ 
        Generates a controller to maintain a static pose
        
        Arguments:
            qref: (N,) numpy array, the static pose to be maintained

        Return Values:
            u: (M,) numpy array, actuations to best achieve static pose
            f: (C,) numpy array, associated normal reaction forces

        static_controller generates the actuations and reaction forces assuming the velocity and accelerations are zero. Thus, the equation to solve is:
            N(q) = B*u + J*f
        where N is a vector of gravitational and conservative generalized forces, B is the actuation selection matrix, and J is the contact-Jacobian transpose.

        Currently, static_controller only considers the effects of the normal forces. Frictional forces are not yet supported
        """
        #TODO: Solve for friction forces as well

        # Check inputs
        if qref.ndim > 1 or qref.shape[0] != self.multibody.num_positions():
            raise ValueError(
                f"Reference position mut be ({self.multibody.num_positions(),},) array"
            )
        # Set the context
        context = self.multibody.CreateDefaultContext()
        self.multibody.SetPositions(context, qref)
        # Get the necessary properties
        G = self.multibody.CalcGravityGeneralizedForces(context)
        Jn, _ = self.GetContactJacobians(context)
        phi = self.GetNormalDistances(context)
        B = self.multibody.MakeActuationMatrix()
        #Collect terms
        A = np.concatenate([B, Jn.transpose()], axis=1)
        # Create the mathematical program
        prog = MathematicalProgram()
        l_var = prog.NewContinuousVariables(self.num_contacts(), name="forces")
        u_var = prog.NewContinuousVariables(self.multibody.num_actuators(),
                                            name="controls")
        # Ensure dynamics approximately satisfied
        prog.AddL2NormCost(A=A,
                           b=-G,
                           vars=np.concatenate([u_var, l_var], axis=0))
        # Enforce normal complementarity
        prog.AddBoundingBoxConstraint(np.zeros(l_var.shape),
                                      np.full(l_var.shape, np.inf), l_var)
        prog.AddConstraint(phi.dot(l_var) == 0)
        # Solve
        result = Solve(prog)
        # Check for a solution
        if result.is_success():
            u = result.GetSolution(u_var)
            f = result.GetSolution(l_var)
            if verbose:
                printProgramReport(result, prog)
            return (u, f)
        else:
            print(f"Optimization failed. Returning zeros")
            if verbose:
                printProgramReport(result, prog)
            return (np.zeros(u_var.shape), np.zeros(l_var.shape))
コード例 #28
0
def solveOptimization(state_init,
                      t_impact,
                      impact_combination,
                      T,
                      u_guess=None,
                      x_guess=None,
                      h_guess=None):
    prog = MathematicalProgram()
    h = prog.NewContinuousVariables(T, name='h')
    u = prog.NewContinuousVariables(rows=T + 1,
                                    cols=2 * n_quadrotors,
                                    name='u')
    x = prog.NewContinuousVariables(rows=T + 1,
                                    cols=6 * n_quadrotors + 4 * n_balls,
                                    name='x')
    dv = prog.decision_variables()

    prog.AddBoundingBoxConstraint([h_min] * T, [h_max] * T, h)

    for i in range(n_quadrotors):
        sys = Quadrotor2D()
        context = sys.CreateDefaultContext()
        dir_coll_constr = DirectCollocationConstraint(sys, context)
        ind_x = 6 * i
        ind_u = 2 * i

        for t in range(T):
            impact_indices = impact_combination[np.argmax(
                np.abs(t - t_impact) <= 1)]
            quad_ind, ball_ind = impact_indices[0], impact_indices[1]

            if quad_ind == i and np.any(
                    t == t_impact
            ):  # Don't add Direct collocation constraint at impact
                continue
            elif quad_ind == i and (np.any(t == t_impact - 1)
                                    or np.any(t == t_impact + 1)):
                prog.AddConstraint(
                    eq(
                        x[t + 1,
                          ind_x:ind_x + 3], x[t, ind_x:ind_x + 3] + h[t] *
                        x[t + 1, ind_x + 3:ind_x + 6]))  # Backward euler
                prog.AddConstraint(
                    eq(x[t + 1, ind_x + 3:ind_x + 6], x[t,
                                                        ind_x + 3:ind_x + 6])
                )  # Zero-acceleration assumption during this time step. Should maybe replace with something less naive
            else:
                AddDirectCollocationConstraint(
                    dir_coll_constr, np.array([[h[t]]]),
                    x[t, ind_x:ind_x + 6].reshape(-1, 1),
                    x[t + 1, ind_x:ind_x + 6].reshape(-1, 1),
                    u[t, ind_u:ind_u + 2].reshape(-1, 1),
                    u[t + 1, ind_u:ind_u + 2].reshape(-1, 1), prog)

    for i in range(n_balls):
        sys = Ball2D()
        context = sys.CreateDefaultContext()
        dir_coll_constr = DirectCollocationConstraint(sys, context)
        ind_x = 6 * n_quadrotors + 4 * i

        for t in range(T):
            impact_indices = impact_combination[np.argmax(
                np.abs(t - t_impact) <= 1)]
            quad_ind, ball_ind = impact_indices[0], impact_indices[1]

            if ball_ind == i and np.any(
                    t == t_impact
            ):  # Don't add Direct collocation constraint at impact
                continue
            elif ball_ind == i and (np.any(t == t_impact - 1)
                                    or np.any(t == t_impact + 1)):
                prog.AddConstraint(
                    eq(
                        x[t + 1,
                          ind_x:ind_x + 2], x[t, ind_x:ind_x + 2] + h[t] *
                        x[t + 1, ind_x + 2:ind_x + 4]))  # Backward euler
                prog.AddConstraint(
                    eq(x[t + 1,
                         ind_x + 2:ind_x + 4], x[t, ind_x + 2:ind_x + 4] +
                       h[t] * np.array([0, -9.81])))
            else:
                AddDirectCollocationConstraint(
                    dir_coll_constr, np.array([[h[t]]]),
                    x[t, ind_x:ind_x + 4].reshape(-1, 1),
                    x[t + 1, ind_x:ind_x + 4].reshape(-1, 1),
                    u[t, 0:0].reshape(-1, 1), u[t + 1, 0:0].reshape(-1,
                                                                    1), prog)

    # Initial conditions
    prog.AddLinearConstraint(eq(x[0, :], state_init))

    # Final conditions
    prog.AddLinearConstraint(eq(x[T, 0:14], state_final[0:14]))
    # Quadrotor final conditions (full state)
    for i in range(n_quadrotors):
        ind = 6 * i
        prog.AddLinearConstraint(
            eq(x[T, ind:ind + 6], state_final[ind:ind + 6]))

    # Ball final conditions (position only)
    for i in range(n_balls):
        ind = 6 * n_quadrotors + 4 * i
        prog.AddLinearConstraint(
            eq(x[T, ind:ind + 2], state_final[ind:ind + 2]))

    # Input constraints
    for i in range(n_quadrotors):
        prog.AddLinearConstraint(ge(u[:, 2 * i], -20.0))
        prog.AddLinearConstraint(le(u[:, 2 * i], 20.0))
        prog.AddLinearConstraint(ge(u[:, 2 * i + 1], -20.0))
        prog.AddLinearConstraint(le(u[:, 2 * i + 1], 20.0))

    # Don't allow quadrotor to pitch more than 60 degrees
    for i in range(n_quadrotors):
        prog.AddLinearConstraint(ge(x[:, 6 * i + 2], -np.pi / 3))
        prog.AddLinearConstraint(le(x[:, 6 * i + 2], np.pi / 3))

    # Ball position constraints
    # for i in range(n_balls):
    #     ind_i = 6*n_quadrotors + 4*i
    #     prog.AddLinearConstraint(ge(x[:,ind_i],-2.0))
    #     prog.AddLinearConstraint(le(x[:,ind_i], 2.0))
    #     prog.AddLinearConstraint(ge(x[:,ind_i+1],-3.0))
    #     prog.AddLinearConstraint(le(x[:,ind_i+1], 3.0))

    # Impact constraint
    quad_temp = Quadrotor2D()

    for i in range(n_quadrotors):
        for j in range(n_balls):
            ind_q = 6 * i
            ind_b = 6 * n_quadrotors + 4 * j
            for t in range(T):
                if np.any(
                        t == t_impact
                ):  # If quad i and ball j impact at time t, add impact constraint
                    impact_indices = impact_combination[np.argmax(
                        t == t_impact)]
                    quad_ind, ball_ind = impact_indices[0], impact_indices[1]
                    if quad_ind == i and ball_ind == j:
                        # At impact, witness function == 0
                        prog.AddConstraint(lambda a: np.array([
                            CalcClosestDistanceQuadBall(a[0:3], a[3:5])
                        ]).reshape(1, 1),
                                           lb=np.zeros((1, 1)),
                                           ub=np.zeros((1, 1)),
                                           vars=np.concatenate(
                                               (x[t, ind_q:ind_q + 3],
                                                x[t,
                                                  ind_b:ind_b + 2])).reshape(
                                                      -1, 1))
                        # At impact, enforce discrete collision update for both ball and quadrotor
                        prog.AddConstraint(
                            CalcPostCollisionStateQuadBallResidual,
                            lb=np.zeros((6, 1)),
                            ub=np.zeros((6, 1)),
                            vars=np.concatenate(
                                (x[t, ind_q:ind_q + 6], x[t, ind_b:ind_b + 4],
                                 x[t + 1, ind_q:ind_q + 6])).reshape(-1, 1))
                        prog.AddConstraint(
                            CalcPostCollisionStateBallQuadResidual,
                            lb=np.zeros((4, 1)),
                            ub=np.zeros((4, 1)),
                            vars=np.concatenate(
                                (x[t, ind_q:ind_q + 6], x[t, ind_b:ind_b + 4],
                                 x[t + 1, ind_b:ind_b + 4])).reshape(-1, 1))

                        # rough constraints to enforce hitting center-ish of paddle
                        prog.AddLinearConstraint(
                            x[t, ind_q] - x[t, ind_b] >= -0.01)
                        prog.AddLinearConstraint(
                            x[t, ind_q] - x[t, ind_b] <= 0.01)
                        continue
                # Everywhere else, witness function must be > 0
                prog.AddConstraint(lambda a: np.array([
                    CalcClosestDistanceQuadBall(a[ind_q:ind_q + 3], a[
                        ind_b:ind_b + 2])
                ]).reshape(1, 1),
                                   lb=np.zeros((1, 1)),
                                   ub=np.inf * np.ones((1, 1)),
                                   vars=x[t, :].reshape(-1, 1))

    # Don't allow quadrotor collisions
    # for t in range(T):
    #     for i in range(n_quadrotors):
    #         for j in range(i+1, n_quadrotors):
    #             prog.AddConstraint((x[t,6*i]-x[t,6*j])**2 + (x[t,6*i+1]-x[t,6*j+1])**2 >= 0.65**2)

    # Quadrotors stay on their own side
    # prog.AddLinearConstraint(ge(x[:, 0], 0.3))
    # prog.AddLinearConstraint(le(x[:, 6], -0.3))

    ###############################################################################
    # Set up initial guesses
    initial_guess = np.empty(prog.num_vars())

    # # initial guess for the time step
    prog.SetDecisionVariableValueInVector(h, h_guess, initial_guess)

    x_init[0, :] = state_init
    prog.SetDecisionVariableValueInVector(x, x_guess, initial_guess)

    prog.SetDecisionVariableValueInVector(u, u_guess, initial_guess)

    solver = SnoptSolver()
    print("Solving...")
    result = solver.Solve(prog, initial_guess)

    # print(GetInfeasibleConstraints(prog,result))
    # be sure that the solution is optimal
    assert result.is_success()

    print(f'Solution found? {result.is_success()}.')

    #################################################################################
    # Extract results
    # get optimal solution
    h_opt = result.GetSolution(h)
    x_opt = result.GetSolution(x)
    u_opt = result.GetSolution(u)
    time_breaks_opt = np.array([sum(h_opt[:t]) for t in range(T + 1)])
    u_opt_poly = PiecewisePolynomial.ZeroOrderHold(time_breaks_opt, u_opt.T)
    # x_opt_poly = PiecewisePolynomial.Cubic(time_breaks_opt, x_opt.T, False)
    x_opt_poly = PiecewisePolynomial.FirstOrderHold(
        time_breaks_opt, x_opt.T
    )  # Switch to first order hold instead of cubic because cubic was taking too long to create
    #################################################################################
    # Create list of K matrices for time varying LQR
    context = quad_plant.CreateDefaultContext()
    breaks = copy.copy(
        time_breaks_opt)  #np.linspace(0, x_opt_poly.end_time(), 100)

    K_samples = np.zeros((breaks.size, 12 * n_quadrotors))

    for i in range(n_quadrotors):
        K = None
        for j in range(breaks.size):
            context.SetContinuousState(
                x_opt_poly.value(breaks[j])[6 * i:6 * (i + 1)])
            context.FixInputPort(
                0,
                u_opt_poly.value(breaks[j])[2 * i:2 * (i + 1)])
            linear_system = FirstOrderTaylorApproximation(quad_plant, context)
            A = linear_system.A()
            B = linear_system.B()
            try:
                K, _, _ = control.lqr(A, B, Q, R)
            except:
                assert K is not None, "Failed to calculate initial K for quadrotor " + str(
                    i)
                print("Warning: Failed to calculate K at timestep", j,
                      "for quadrotor", i, ". Using K from previous timestep")

            K_samples[j, 12 * i:12 * (i + 1)] = K.reshape(-1)

    K_poly = PiecewisePolynomial.ZeroOrderHold(breaks, K_samples.T)

    return u_opt_poly, x_opt_poly, K_poly, h_opt
コード例 #29
0
    def compute_trajectory_to_other_world(self, state_initial, minimum_time,
                                          maximum_time):
        '''
        Your mission is to implement this function.

        A successful implementation of this function will compute a dynamically feasible trajectory
        which satisfies these criteria:
            - Efficiently conserve fuel
            - Reach "orbit" of the far right world
            - Approximately obey the dynamic constraints
            - Begin at the state_initial provided
            - Take no more than maximum_time, no less than minimum_time

        The above are defined more precisely in the provided notebook.

        Please note there are two return args.

        :param: state_initial: :param state_initial: numpy array of length 4, see rocket_dynamics for documentation
        :param: minimum_time: float, minimum time allowed for trajectory
        :param: maximum_time: float, maximum time allowed for trajectory

        :return: three return args separated by commas:

            trajectory, input_trajectory, time_array

            trajectory: a 2d array with N rows, and 4 columns. See simulate_states_over_time for more documentation.
            input_trajectory: a 2d array with N-1 row, and 2 columns. See simulate_states_over_time for more documentation.
            time_array: an array with N rows. 

        '''

        # length of horizon (excluding init state)
        N = 50
        trajectory = np.zeros((N + 1, 4))
        input_trajectory = np.ones((N, 2)) * 10.0

        ### My implementation of Direct Transcription
        # (states and control are all decision vars using Euler integration)
        mp = MathematicalProgram()

        # let trajectory duration be a decision var
        total_time = mp.NewContinuousVariables(1, "total_time")
        dt = total_time[0] / N

        # create the control decision var (m*N) and state decision var (n*[N+1])
        idx = 0
        u_list = mp.NewContinuousVariables(2, "u_{}".format(idx))
        state_list = mp.NewContinuousVariables(4, "state_{}".format(idx))
        state_list = np.vstack(
            (state_list,
             mp.NewContinuousVariables(4, "state_{}".format(idx + 1))))
        for idx in range(1, N):
            u_list = np.vstack(
                (u_list, mp.NewContinuousVariables(2, "u_{}".format(idx))))
            state_list = np.vstack(
                (state_list,
                 mp.NewContinuousVariables(4, "state_{}".format(idx + 1))))

        ### Constraints
        # set init state as constraint on stage 0 decision vars
        for state_idx in range(4):
            mp.AddLinearConstraint(
                state_list[0, state_idx] == state_initial[state_idx])

        # interstage equality constraint on state vars via Euler integration
        # note: Direct Collocation could improve accuracy for same computation
        for idx in range(1, N + 1):
            state_new = state_list[idx - 1, :] + dt * self.rocket_dynamics(
                state_list[idx - 1, :], u_list[idx - 1, :])
            for state_idx in range(4):
                mp.AddConstraint(state_list[idx,
                                            state_idx] == state_new[state_idx])

        # constraint on time
        mp.AddLinearConstraint(total_time[0] <= maximum_time)
        mp.AddLinearConstraint(total_time[0] >= minimum_time)

        # constraint on final state distance (squared)to second planet
        final_dist_to_world_2_sq = (
            self.world_2_position[0] - state_list[-1, 0])**2 + (
                self.world_2_position[1] - state_list[-1, 1])**2
        mp.AddConstraint(final_dist_to_world_2_sq <= 0.25)

        # constraint on final state speed (squared
        final_speed_sq = state_list[-1, 2]**2 + state_list[-1, 3]**2
        mp.AddConstraint(final_speed_sq <= 1)

        ### Cost
        # equal cost on vertical/horizontal accels, weight shouldn't matter since this is the only cost
        mp.AddQuadraticCost(1 * u_list[:, 0].dot(u_list[:, 0]))
        mp.AddQuadraticCost(1 * u_list[:, 1].dot(u_list[:, 1]))

        ### Solve and parse
        result = Solve(mp)
        trajectory = result.GetSolution(state_list)
        input_trajectory = result.GetSolution(u_list)
        tf = result.GetSolution(total_time)
        time_array = np.linspace(0, tf[0], N + 1)

        print "optimization successful: ", result.is_success()
        print "total num decision vars (x: (N+1)*4, u: 2N, total_time: 1): {}".format(
            mp.num_vars())
        print "solver used: ", result.get_solver_id().name()
        print "optimal trajectory time: {:.2f} sec".format(tf[0])

        return trajectory, input_trajectory, time_array
コード例 #30
0
imp = prog.NewContinuousVariables(nf, name='imp')

# generalized velocity after the heel strike
# (if "mirrored", this velocity must coincide with the
# initial velocity qd[0] to ensure periodicity)
qd_post = prog.NewContinuousVariables(nq, name='qd_post')

"""Here are part of the constraints of the optimization problem."""

# lower and upper bound on the time steps for all t
prog.AddBoundingBoxConstraint([h_min] * T, [h_max] * T, h)

# link the configurations, velocities, and accelerations
# uses implicit Euler method, https://en.wikipedia.org/wiki/Backward_Euler_method
for t in range(T):
    prog.AddConstraint(eq(q[t+1], q[t] + h[t] * qd[t+1]))
    prog.AddConstraint(eq(qd[t+1], qd[t] + h[t] * qdd[t]))

# manipulator equations for all t (implicit Euler)
for t in range(T):
    vars = np.concatenate((q[t+1], qd[t+1], qdd[t], f[t]))
    prog.AddConstraint(manipulator_equations, lb=[0]*nq, ub=[0]*nq, vars=vars)
    
# velocity reset across heel strike
# see http://underactuated.mit.edu/multibody.html#impulsive_collision
vars = np.concatenate((q[-1], qd[-1], qd_post, imp))
prog.AddConstraint(reset_velocity_heelstrike, lb=[0]*(nq+nf), ub=[0]*(nq+nf), vars=vars)
    
# mirror initial and final configuration
# see "The Walking Cycle" section of this notebook
prog.AddLinearConstraint(eq(q[0], - q[-1]))