Exemple #1
0
    def SOS_compute_2(self, l_coeff, S, rho_max=10.):
        prog = MathematicalProgram()
        # fix V and lbda, searcu for u and rho
        x = prog.NewIndeterminates(2, "x")
        # get lbda from before
        l = np.array([x[1], x[0], 1])
        lbda = l.dot(np.dot(l_coeff, l))

        # Define u
        K = prog.NewContinuousVariables(2, "K")

        # Fixed Lyapunov
        V = x.dot(np.dot(S, x))
        Vdot = Jacobian([V], x).dot(self.dynamics_K(x, K))[0]

        # rho is decision variable now
        rho = prog.NewContinuousVariables(1, "rho")[0]

        prog.AddSosConstraint(-Vdot - lbda * (rho - V))

        prog.AddLinearConstraint(rho <= rho_max)
        prog.AddLinearCost(-rho)
        prog.Solve()
        rho = prog.GetSolution(rho)
        K = prog.GetSolution(K)
        return rho, K
Exemple #2
0
    def SOS_compute_3(self, K, l_coeff, rho_max=10.):
        prog = MathematicalProgram()
        # fix u and lbda, search for V and rho
        x = prog.NewIndeterminates(2, "x")

        # get lbda from before
        l = np.array([x[1], x[0], 1])
        lbda = l.dot(np.dot(l_coeff, l))

        # rho is decision variable now
        rho = prog.NewContinuousVariables(1, "rho")[0]

        # create lyap V
        s = prog.NewContinuousVariables(4, "s")
        S = np.array([[s[0], s[1]], [s[2], s[3]]])
        V = x.dot(np.dot(S, x))
        Vdot = Jacobian([V], x).dot(self.dynamics_K(x, K))[0]

        prog.AddSosConstraint(V)
        prog.AddSosConstraint(-Vdot - lbda * (rho - V))

        prog.AddLinearCost(-rho)
        prog.AddLinearConstraint(rho <= rho_max)

        prog.Solve()
        rho = prog.GetSolution(rho)
        s = prog.GetSolution(s)
        return s, rho
    def intercepting_with_obs_avoidance(self,
                                        p0,
                                        v0,
                                        pf,
                                        vf,
                                        T,
                                        obstacles=None,
                                        p_puck=None):
        """Intercepting trajectory that avoids obs"""
        x0 = np.array(np.concatenate((p0, v0), axis=0))
        xf = np.concatenate((pf, vf), axis=0)
        prog = MathematicalProgram()

        # state and control inputs
        N = int(T / self.params.dt)  # number of time steps
        state = prog.NewContinuousVariables(N + 1, 4, 'state')
        cmd = prog.NewContinuousVariables(N, 2, 'input')

        # Initial and final state
        prog.AddLinearConstraint(eq(state[0], x0))
        #prog.AddLinearConstraint(eq(state[-1], xf))
        prog.AddQuadraticErrorCost(Q=10.0 * np.eye(4),
                                   x_desired=xf,
                                   vars=state[-1])

        self.add_dynamics(prog, N, state, cmd)

        ## Input saturation
        self.add_input_limits(prog, cmd, N)

        # Arena constraints
        self.add_arena_limits(prog, state, N)

        for k in range(N):
            prog.AddQuadraticCost(cmd[k].flatten().dot(cmd[k]))

        # Add non-linear constraints - will solve with SNOPT
        # Avoid other players
        if obstacles != None:
            for obs in obstacles:
                self.avoid_other_player_nl(prog, state, obs, N)

        # avoid hitting the puck while generating a kicking trajectory
        if not p_puck.any(None):
            self.avoid_puck_nl(prog, state, N, p_puck)

        solver = SnoptSolver()
        result = solver.Solve(prog)
        solution_found = result.is_success()
        if not solution_found:
            print("Solution not found for intercepting_with_obs_avoidance")

        u_values = result.GetSolution(cmd)
        return solution_found, u_values.T
Exemple #4
0
	def optimization(self, quad_plants, ball_plants, contexts):
		prog = MathematicalProgram()
		#Time steps likely requires a line search. 
		#Find the number of time steps until a ball reaches a threshold so that a quadrotor can catch it.
		N = 200 #Number of time steps
		n_u = 2*len(plants)
		n_x = 6*len(quad_plants) + 4*len(ball_plants)
		n_quads = len(quad_plants)
		n_balls = len(ball_plants)
		u = np.empty((self.n_u, N-1), dtype=Variable)
		x = np.empty((self.n_x, N), dtype=Variable)
		I =  0.00383
		r = 0.25
		mass = 0.486
		g = 9.81
		#Add all the decision variables
		for n in range(N-1):
			u[:,n] = prog.NewContinuousVariables(self.n_u, 'u' + str(n))
			x[:,n] = prog.NewContinuousVariables(self.n_x, 'x' + str(n))
		x[:,N-1] = prog.NewContinuousVariables(self.n_x, 'x' + str(N))

		#Start at the correct initial conditions
		#Connect quadrotor and ball state variables to the initial condition of mathprog
		x0 = np.empty((self.n_x,1))
		for k in range(n_quads + n_balls):
			if k < n_quads:
				x0[6*k:6*k+5] = self.quad_plants[k] #???
		prog.AddBoundingBoxConstraint(x0, x0, x[:,0])
		for n in range(N-1):
			for k in range(n_quads + n_balls):
				#Handles no collisions
				if k < n_quads:
					#Quad dynamic constraints
					dynamics_constraint_vel = #?
					dynamics_constraint_acc = #?
					prog.AddConstraint(dynamics_constraint_vel[0, 0])
	  				prog.AddConstraint(dynamics_constraint_vel[1, 0])
					prog.AddConstraint(dynamics_constraint_vel[2, 0])
					prog.AddConstraint(dynamics_constraint_acc[0, 0])
	  				prog.AddConstraint(dynamics_constraint_acc[1, 0])
					prog.AddConstraint(dynamics_constraint_acc[2, 0])
				else:
					#Ball dynamic constraints
					dynamics_constraint_vel = #
					dynamics_constraint_acc = #
					prog.AddConstraint(dynamics_constraint_vel[0, 0])
					prog.AddConstraint(dynamics_constraint_vel[1, 0])
					prog.AddConstraint(dynamics_constraint_acc[0, 0])
	  				prog.AddConstraint(dynamics_constraint_acc[1, 0])
Exemple #5
0
def initialize_problem(n_x, n_u, N, x0):
    ''' returns base MathematicalProgram object and empty decision vars '''
    prog = MathematicalProgram()

    # Decision Variables
    x = prog.NewContinuousVariables(n_x, N, 'x')
    u = prog.NewContinuousVariables(n_u, N-1, 'u')
    z = []  # placeholder for obstacle binary variables
    q = []  # placeholder for obstacle slack buffer variables

    # initial conditions constraints
    prog.AddBoundingBoxConstraint(x0, x0, x[:, 0])

    decision_vars = [x, u, z, q]
    return prog, decision_vars
		def CheckLevelSet(self, prev_x, x0, Vs, Vsdot, rho, multiplier_degree):
			prog = MathematicalProgram()
			x = prog.NewIndeterminates(len(prev_x),'x')
			V = Vs.Substitute(dict(zip(prev_x, x)))
			Vdot = Vsdot.Substitute(dict(zip(prev_x, x)))
			slack = prog.NewContinuousVariables(1,'a')[0]  
			#mapping = dict(zip(x, np.ones(len(x))))
			#V_norm = 0.0*V
			#for i in range(len(x)):
			#	basis = np.ones(len(x))
			#	V_norm = V_norm + V.Substitute(dict(zip(x, basis)))
			#V = V/V_norm
			#Vdot = Vdot/V_norm
			#prog.AddConstraint(V_norm == 0)

			# in relative state (lambda(xbar)
			Lambda = prog.NewSosPolynomial(Variables(x), multiplier_degree)[0].ToExpression() 
			Lambda = Lambda.Substitute(dict(zip(x, x-x0))) # switch to relative state (lambda(xbar)
			prog.AddSosConstraint(-Vdot + Lambda*(V - rho) - slack*V)
			prog.AddCost(-slack)
			#import pdb; pdb.set_trace()
			result = Solve(prog)
			if(not result.is_success()):
				print('%s, %s' %(result.get_solver_id().name(),result.get_solution_result()) )
				print('slack = %f' %(result.GetSolution(slack)) )
				print('Rho = %f' %(rho))
				#assert result.is_success()
				return -1.0
			
			return result.GetSolution(slack)
Exemple #7
0
    def SOS_compute_1(self, S, rho_prev):
        # fix V and rho, search for L and u
        prog = MathematicalProgram()
        x = prog.NewIndeterminates(2, "x")

        # Define u
        K = prog.NewContinuousVariables(2, "K")

        # Fixed Lyapunov
        V = x.dot(np.dot(S, x))
        Vdot = Jacobian([V], x).dot(self.dynamics_K(x, K))[0]

        # Define the Lagrange multipliers.
        (lambda_, constraint) = prog.NewSosPolynomial(Variables(x), 2)
        prog.AddLinearConstraint(K[0] * x[0] <= 2.5)
        prog.AddSosConstraint(-Vdot - lambda_.ToExpression() * (rho_prev - V))

        result = prog.Solve()
        # print(lambda_.ToExpression())
        # print(lambda_.decision_variables())
        lc = [prog.GetSolution(var) for var in lambda_.decision_variables()]
        lbda_coeff = np.ones([3, 3])
        lbda_coeff[0, 0] = lc[0]
        lbda_coeff[0, 1] = lbda_coeff[1, 0] = lc[1]
        lbda_coeff[2, 0] = lbda_coeff[0, 2] = lc[2]
        lbda_coeff[1, 1] = lc[3]
        lbda_coeff[2, 1] = lbda_coeff[1, 2] = lc[4]
        lbda_coeff[2, 2] = lc[5]
        return lbda_coeff
Exemple #8
0
def run_simple_mathematical_program():
    print "\n\nsimple mathematical program"
    mp = MathematicalProgram()
    x = mp.NewContinuousVariables(1, "x")
    mp.AddLinearCost(x[0] * 1.0)
    mp.AddLinearConstraint(x[0] >= 1)
    print mp.Solve()
    print mp.GetSolution(x)
    print "(finished) simple mathematical program"
Exemple #9
0
def solve_lcp(P, q):
    prog = MathematicalProgram()
    x = prog.NewContinuousVariables(q.size)
    prog.AddLinearComplementarityConstraint(P, q, x)
    result = Solve(prog)

    status = result.is_success()
    z = result.GetSolution(x)
    return (z, status)
Exemple #10
0
def run_complex_mathematical_program():
    print "\n\ncomplex mathematical program"
    mp = MathematicalProgram()
    x = mp.NewContinuousVariables(1, 'x')
    mp.AddCost(cost, x)
    mp.AddConstraint(quad_constraint, [1.0], [2.0], x)
    mp.SetInitialGuess(x, [1.1])
    print mp.Solve()
    res = mp.GetSolution(x)
    print res

    print "(finished) complex mathematical program"
Exemple #11
0
    def test_create_constraint_input_array(self):
        prog = MathematicalProgram()
        q = prog.NewContinuousVariables(1, 'q')
        r = prog.NewContinuousVariables(rows=2, cols=3, name='r')
        q_val = 0.5
        r_val = np.arange(6).reshape((2,3))
        '''
        array([[0, 1, 2],
               [3, 4, 5]])
        '''

        constraint = prog.AddConstraint(le(q, r[0,0] + 2*r[1,1]))
        input_array = create_constraint_input_array(constraint, {
            "q": q_val,
            "r": r_val
        })
        expected_input_array = [0.5, 0, 4]
        np.testing.assert_array_almost_equal(input_array, expected_input_array)

        z = prog.NewContinuousVariables(2, 'z')
        z_val = np.array([0.0, 0.5])
        # https://stackoverflow.com/questions/64736910/using-le-or-ge-with-scalar-left-hand-side-creates-unsized-formula-array
        # constraint = prog.AddConstraint(le(z[1], r[0,2] + 2*r[1,0]))
        constraint = prog.AddConstraint(le([z[1]], r[0,2] + 2*r[1,0]))
        input_array = create_constraint_input_array(constraint, {
            "z": z_val,
            "r": r_val
        })
        expected_input_array = [3, 2, 0.5]
        np.testing.assert_array_almost_equal(input_array, expected_input_array)

        a = prog.NewContinuousVariables(rows=2, cols=1, name='a')
        a_val = np.array([[1.0], [2.0]])
        constraint = prog.AddConstraint(eq(a[0], a[1]))
        input_array = create_constraint_input_array(constraint, {
            "a": a_val
        })
        expected_input_array = [1.0, 2.0]
        np.testing.assert_array_almost_equal(input_array, expected_input_array)
Exemple #12
0
			def CheckLevelSet(prev_x, prev_V, prev_Vdot, rho, multiplier_degree):
				prog = MathematicalProgram()
				x = prog.NewIndeterminates(len(prev_x),'x')
				V = prev_V.Substitute(dict(zip(prev_x, x)))
				Vdot = prev_Vdot.Substitute(dict(zip(prev_x, x)))
				slack = prog.NewContinuousVariables(1)[0]

				Lambda = prog.NewSosPolynomial(Variables(x), multiplier_degree)[0].ToExpression()

				prog.AddSosConstraint(-Vdot + Lambda*(V - rho) - slack*V)
				prog.AddCost(-slack)

				result = Solve(prog)
				assert result.is_success()
				return result.GetSolution(slack)
    def CheckLevelSet(prev_x, prev_V, prev_Vdot, rho, multiplier_degree):
        #import pdb; pdb.set_trace()
        prog = MathematicalProgram()
        x = prog.NewIndeterminates(len(prev_x),'x')
        V = prev_V.Substitute(dict(zip(prev_x, x)))
        Vdot = prev_Vdot.Substitute(dict(zip(prev_x, x)))
        slack = prog.NewContinuousVariables(1,'a')[0]

        Lambda = prog.NewSosPolynomial(Variables(x), multiplier_degree)[0].ToExpression()
        #print('V degree: %d' %(Polynomial(V).TotalDegree()))
        #print('Vdot degree: %d' %(Polynomial(Vdot).TotalDegree()))
        #print('SOS degree: %d' %(Polynomial(-Vdot + Lambda*(V - rho) - slack*V).TotalDegree()))
        prog.AddSosConstraint(-Vdot + Lambda*(V - rho) - slack*V)
        prog.AddCost(-slack)

        result = Solve(prog)
        #assert result.is_success()
        return result.GetSolution(slack)
Exemple #14
0
def test_Feedback_Linearization_controller_BS(x, t):
    # Output feedback linearization 2
    #
    # y1= ||r-rf||^2/2
    # y2= wx
    # y3= wy
    # y4= wz
    #
    # Analysis: The zero dynamics is unstable.
    global g, xf, Aout, Bout, Mout, T_period, w_freq, radius

    print("%%%%%%%%%%%%%%%%%%%%%")
    print("%%CLF-QP  %%%%%%%%%%%")
    print("%%%%%%%%%%%%%%%%%%%%%")

    print("t:", t)
    prog = MathematicalProgram()
    u_var = prog.NewContinuousVariables(4, "u_var")
    c_var = prog.NewContinuousVariables(1, "c_var")

    # # # Example 1 : Circle

    x_f = radius * math.cos(w_freq * t)
    y_f = radius * math.sin(w_freq * t)
    # print("x_f:",x_f)
    # print("y_f:",y_f)
    dx_f = -radius * math.pow(w_freq, 1) * math.sin(w_freq * t)
    dy_f = radius * math.pow(w_freq, 1) * math.cos(w_freq * t)
    ddx_f = -radius * math.pow(w_freq, 2) * math.cos(w_freq * t)
    ddy_f = -radius * math.pow(w_freq, 2) * math.sin(w_freq * t)
    dddx_f = radius * math.pow(w_freq, 3) * math.sin(w_freq * t)
    dddy_f = -radius * math.pow(w_freq, 3) * math.cos(w_freq * t)
    ddddx_f = radius * math.pow(w_freq, 4) * math.cos(w_freq * t)
    ddddy_f = radius * math.pow(w_freq, 4) * math.sin(w_freq * t)

    # # Example 2 : Lissajous curve a=1 b=2
    # ratio_ab=2;
    # a=1;
    # b=ratio_ab*a;
    # delta_lissajous = 0 #math.pi/2;

    # x_f = radius*math.sin(a*w_freq*t+delta_lissajous)
    # y_f = radius*math.sin(b*w_freq*t)
    # # print("x_f:",x_f)
    # # print("y_f:",y_f)
    # dx_f = radius*math.pow(a*w_freq,1)*math.cos(a*w_freq*t+delta_lissajous)
    # dy_f = radius*math.pow(b*w_freq,1)*math.cos(b*w_freq*t)
    # ddx_f = -radius*math.pow(a*w_freq,2)*math.sin(a*w_freq*t+delta_lissajous)
    # ddy_f = -radius*math.pow(b*w_freq,2)*math.sin(b*w_freq*t)
    # dddx_f = -radius*math.pow(a*w_freq,3)*math.cos(a*w_freq*t+delta_lissajous)
    # dddy_f = -radius*math.pow(b*w_freq,3)*math.cos(b*w_freq*t)
    # ddddx_f = radius*math.pow(a*w_freq,4)*math.sin(a*w_freq*t+delta_lissajous)
    # ddddy_f = radius*math.pow(b*w_freq,4)*math.sin(b*w_freq*t)

    e1 = np.array([1, 0, 0])  # e3 elementary vector
    e2 = np.array([0, 1, 0])  # e3 elementary vector
    e3 = np.array([0, 0, 1])  # e3 elementary vector

    # epsilonn= 1e-0
    # alpha = 100;
    # kp1234=alpha*1/math.pow(epsilonn,4) # gain for y
    # kd1=4/math.pow(epsilonn,3) # gain for y^(1)
    # kd2=12/math.pow(epsilonn,2) # gain for y^(2)
    # kd3=4/math.pow(epsilonn,1)  # gain for y^(3)

    # kp5= 10;                    # gain for y5

    q = np.zeros(7)
    qd = np.zeros(6)
    q = x[0:8]
    qd = x[8:15]

    print("qnorm:", np.dot(q[3:7], q[3:7]))

    q0 = q[3]
    q1 = q[4]
    q2 = q[5]
    q3 = q[6]
    xi1 = q[7]

    v = qd[0:3]
    w = qd[3:6]
    xi2 = qd[6]

    xd = xf[0]
    yd = xf[1]
    zd = xf[2]
    wd = xf[11:14]

    # Useful vectors and matrices

    (Rq, Eq, wIw, I_inv) = robobee_plantBS.GetManipulatorDynamics(q, qd)

    F1q = np.zeros((3, 4))
    F1q[0, :] = np.array([q2, q3, q0, q1])
    F1q[1, :] = np.array([-1 * q1, -1 * q0, q3, q2])
    F1q[2, :] = np.array([q0, -1 * q1, -1 * q2, q3])

    Rqe3 = np.dot(Rq, e3)
    Rqe3_hat = np.zeros((3, 3))
    Rqe3_hat[0, :] = np.array([0, -Rqe3[2], Rqe3[1]])
    Rqe3_hat[1, :] = np.array([Rqe3[2], 0, -Rqe3[0]])
    Rqe3_hat[2, :] = np.array([-Rqe3[1], Rqe3[0], 0])

    Rqe1 = np.dot(Rq, e1)
    Rqe1_hat = np.zeros((3, 3))
    Rqe1_hat[0, :] = np.array([0, -Rqe1[2], Rqe1[1]])
    Rqe1_hat[1, :] = np.array([Rqe1[2], 0, -Rqe1[0]])
    Rqe1_hat[2, :] = np.array([-Rqe1[1], Rqe1[0], 0])

    Rqe1_x = np.dot(e2.T, Rqe1)
    Rqe1_y = np.dot(e1.T, Rqe1)

    w_hat = np.zeros((3, 3))
    w_hat[0, :] = np.array([0, -w[2], w[1]])
    w_hat[1, :] = np.array([w[2], 0, -w[0]])
    w_hat[2, :] = np.array([-w[1], w[0], 0])

    Rw = np.dot(Rq, w)
    Rw_hat = np.zeros((3, 3))
    Rw_hat[0, :] = np.array([0, -Rw[2], Rw[1]])
    Rw_hat[1, :] = np.array([Rw[2], 0, -Rw[0]])
    Rw_hat[2, :] = np.array([-Rw[1], Rw[0], 0])
    #- Checking the derivation

    # print("F1qEqT-(-Rqe3_hat)",np.dot(F1q,Eq.T)-(-Rqe3_hat))
    # Rqe3_cal = np.zeros(3)
    # Rqe3_cal[0] = 2*(q3*q1+q0*q2)
    # Rqe3_cal[1] = 2*(q3*q2-q0*q1)
    # Rqe3_cal[2] = (q0*q0+q3*q3-q1*q1-q2*q2)

    # print("Rqe3 - Rqe3_cal", Rqe3-Rqe3_cal)

    # Four output
    y1 = x[0] - x_f
    y2 = x[1] - y_f
    y3 = x[2] - zd - 0
    y4 = math.atan2(Rqe1_x, Rqe1_y) - math.pi / 8
    # print("Rqe1_x:", Rqe1_x)

    eta1 = np.zeros(3)
    eta1 = np.array([y1, y2, y3])
    eta5 = y4

    # print("y4", y4)

    # First derivative of first three output and yaw output
    eta2 = np.zeros(3)
    eta2 = v - np.array([dx_f, dy_f, 0])
    dy1 = eta2[0]
    dy2 = eta2[1]
    dy3 = eta2[2]

    x2y2 = (math.pow(Rqe1_x, 2) + math.pow(Rqe1_y, 2))  # x^2+y^2

    eta6_temp = np.zeros(3)  #eta6_temp = (ye2T-xe1T)/(x^2+y^2)
    eta6_temp = (Rqe1_y * e2.T - Rqe1_x * e1.T) / x2y2
    # print("eta6_temp:", eta6_temp)
    # Body frame w  ( multiply R)
    eta6 = np.dot(eta6_temp, np.dot(-Rqe1_hat, np.dot(Rq, w)))

    # World frame w
    # eta6 = np.dot(eta6_temp,np.dot(-Rqe1_hat,w))
    # print("Rqe1_hat:", Rqe1_hat)

    # Second derivative of first three output
    eta3 = np.zeros(3)
    eta3 = -g * e3 + Rqe3 * xi1 - np.array([ddx_f, ddy_f, 0])
    ddy1 = eta3[0]
    ddy2 = eta3[1]
    ddy3 = eta3[2]

    # Third derivative of first three output
    eta4 = np.zeros(3)
    # Body frame w ( multiply R)
    eta4 = Rqe3 * xi2 + np.dot(-Rqe3_hat, np.dot(Rq, w)) * xi1 - np.array(
        [dddx_f, dddy_f, 0])

    # World frame w
    # eta4 = Rqe3*xi2+np.dot(np.dot(F1q,Eq.T),w)*xi1 - np.array([dddx_f,dddy_f,0])
    dddy1 = eta4[0]
    dddy2 = eta4[1]
    dddy3 = eta4[2]

    # Fourth derivative of first three output
    B_qw_temp = np.zeros(3)
    # Body frame w
    B_qw_temp = xi1 * (-np.dot(Rw_hat, np.dot(Rqe3_hat, Rw)) +
                       np.dot(Rqe3_hat, np.dot(Rq, np.dot(I_inv, wIw)))
                       )  # np.dot(I_inv,wIw)*xi1-2*w*xi2
    B_qw = B_qw_temp + xi2 * (-2 * np.dot(Rqe3_hat, Rw)) - np.array(
        [ddddx_f, ddddy_f, 0])  #np.dot(Rqe3_hat,B_qw_temp)

    # World frame w
    # B_qw_temp = xi1*(-np.dot(w_hat,np.dot(Rqe3_hat,w))+np.dot(Rqe3_hat,np.dot(I_inv,wIw))) # np.dot(I_inv,wIw)*xi1-2*w*xi2
    # B_qw      = B_qw_temp+xi2*(-2*np.dot(Rqe3_hat,w)) - np.array([ddddx_f,ddddy_f,0])   #np.dot(Rqe3_hat,B_qw_temp)

    # B_qw = B_qw_temp - np.dot(w_hat,np.dot(Rqe3_hat,w))*xi1

    # Second derivative of yaw output

    # Body frame w
    dRqe1_x = np.dot(e2, np.dot(-Rqe1_hat, Rw))  # \dot{x}
    dRqe1_y = np.dot(e1, np.dot(-Rqe1_hat, Rw))  # \dot{y}
    alpha1 = 2 * (Rqe1_x * dRqe1_x +
                  Rqe1_y * dRqe1_y) / x2y2  # (2xdx +2ydy)/(x^2+y^2)

    # World frame w
    # dRqe1_x = np.dot(e2,np.dot(-Rqe1_hat,w)) # \dot{x}
    # dRqe1_y = np.dot(e1,np.dot(-Rqe1_hat,w)) # \dot{y}

    # alpha1 = 2*(Rqe1_x*dRqe1_x+Rqe1_y*dRqe1_y)/x2y2 # (2xdx +2ydy)/(x^2+y^2)
    # # alpha2 = math.pow(dRqe1_y,2)-math.pow(dRqe1_x,2)

    # Body frame w

    B_yaw_temp3 = np.zeros(3)
    B_yaw_temp3 = alpha1 * np.dot(Rqe1_hat, Rw) + np.dot(
        Rqe1_hat, np.dot(Rq, np.dot(I_inv, wIw))) - np.dot(
            Rw_hat, np.dot(Rqe1_hat, Rw))

    B_yaw = np.dot(eta6_temp,
                   B_yaw_temp3)  # +alpha2 :Could be an error in math.
    g_yaw = np.zeros(3)
    g_yaw = -np.dot(eta6_temp, np.dot(Rqe1_hat, np.dot(Rq, I_inv)))

    # World frame w
    # B_yaw_temp3 =np.zeros(3)
    # B_yaw_temp3 = alpha1*np.dot(Rqe1_hat,w)+np.dot(Rqe1_hat,np.dot(I_inv,wIw))-np.dot(w_hat,np.dot(Rqe1_hat,w))

    # B_yaw = np.dot(eta6_temp,B_yaw_temp3) # +alpha2 :Could be an error in math.
    # g_yaw = np.zeros(3)
    # g_yaw = -np.dot(eta6_temp,np.dot(Rqe1_hat,I_inv))

    # print("g_yaw:", g_yaw)
    # Decoupling matrix A(x)\in\mathbb{R}^4

    A_fl = np.zeros((4, 4))
    A_fl[0:3, 0] = Rqe3
    # Body frame w
    A_fl[0:3, 1:4] = -np.dot(Rqe3_hat, np.dot(Rq, I_inv)) * xi1
    # World frame w
    # A_fl[0:3,1:4] = -np.dot(Rqe3_hat,I_inv)*xi1
    A_fl[3, 1:4] = g_yaw

    A_fl_inv = np.linalg.inv(A_fl)
    A_fl_det = np.linalg.det(A_fl)
    # print("I_inv:", I_inv)
    print("A_fl:", A_fl)
    print("A_fl_det:", A_fl_det)

    # Drift in the output dynamics

    U_temp = np.zeros(4)
    U_temp[0:3] = B_qw
    U_temp[3] = B_yaw

    # Output dyamics

    eta = np.hstack([eta1, eta2, eta3, eta5, eta4, eta6])
    eta_norm = np.dot(eta, eta)

    # v-CLF QP controller

    FP_PF = np.dot(Aout.T, Mout) + np.dot(Mout, Aout)
    PG = np.dot(Mout, Bout)

    L_FVx = np.dot(eta, np.dot(FP_PF, eta))
    L_GVx = 2 * np.dot(eta.T, PG)  # row vector
    L_fhx_star = U_temp

    Vx = np.dot(eta, np.dot(Mout, eta))
    # phi0_exp = L_FVx+np.dot(L_GVx,L_fhx_star)+(min_e_Q/max_e_P)*Vx*1    # exponentially stabilizing
    # phi0_exp = L_FVx+np.dot(L_GVx,L_fhx_star)+min_e_Q*eta_norm      # more exact bound - exponentially stabilizing
    phi0_exp = L_FVx + np.dot(
        L_GVx, L_fhx_star)  # more exact bound - exponentially stabilizing

    phi1_decouple = np.dot(L_GVx, A_fl)

    # # Solve QP
    v_var = np.dot(A_fl, u_var) + L_fhx_star
    Quadratic_Positive_def = np.matmul(A_fl.T, A_fl)
    QP_det = np.linalg.det(Quadratic_Positive_def)
    c_QP = 2 * np.dot(L_fhx_star.T, A_fl)

    c_QP_extned = np.hstack([c_QP, -1])
    u_var_extended = np.hstack([u_var, c_var])

    # CLF_QP_cost_v = np.dot(v_var,v_var) // Exact quadratic cost
    CLF_QP_cost_v_effective = np.dot(
        u_var, np.dot(Quadratic_Positive_def, u_var)) + np.dot(
            c_QP, u_var) - c_var[0]  # Quadratic cost without constant term

    # CLF_QP_cost_u = np.dot(u_var,u_var)

    phi1 = np.dot(phi1_decouple, u_var) + c_var[0] * eta_norm

    #----Printing intermediate states

    # print("L_fhx_star: ",L_fhx_star)
    # print("c_QP:", c_QP)
    # print("Qp : ",Quadratic_Positive_def)
    # print("Qp det: ", QP_det)
    # print("c_QP", c_QP)

    # print("phi0_exp: ", phi0_exp)
    # print("PG:", PG)
    # print("L_GVx:", L_GVx)
    # print("eta6", eta6)
    # print("d : ", phi1_decouple)
    # print("Cost expression:", CLF_QP_cost_v)
    #print("Const expression:", phi0_exp+phi1)

    #----Different solver option // Gurobi did not work with python at this point (some binding issue 8/8/2018)
    solver = IpoptSolver()
    # solver = GurobiSolver()
    # print solver.available()
    # assert(solver.available()==True)
    # assertEqual(solver.solver_type(), mp.SolverType.kGurobi)
    # solver.Solve(prog)
    # assertEqual(result, mp.SolutionResult.kSolutionFound)

    # mp.AddLinearConstraint()
    # print("x:", x)
    # print("phi_0_exp:", phi0_exp)
    # print("phi1_decouple:", phi1_decouple)

    # print("eta1:", eta1)
    # print("eta2:", eta2)
    # print("eta3:", eta3)
    # print("eta4:", eta4)
    # print("eta5:", eta5)
    # print("eta6:", eta6)

    # Output Feedback Linearization controller
    mu = np.zeros(4)
    k = np.zeros((4, 14))
    k = np.matmul(Bout.T, Mout)
    k = np.matmul(np.linalg.inv(R), k)

    mu = -1 / 1 * np.matmul(k, eta)

    v = -U_temp + mu

    U_fl = np.matmul(
        A_fl_inv, v
    )  # Output Feedback controller to comare the result with CLF-QP solver

    # Set up the QP problem
    prog.AddQuadraticCost(CLF_QP_cost_v_effective)
    prog.AddConstraint(phi0_exp + phi1 <= 0)
    prog.AddConstraint(c_var[0] >= 0)
    prog.AddConstraint(c_var[0] <= 100)
    prog.AddConstraint(u_var[0] <= 30)
    prog.SetSolverOption(mp.SolverType.kIpopt, "print_level",
                         5)  # CAUTION: Assuming that solver used Ipopt

    print("CLF value:", Vx)  # Current CLF value

    prog.SetInitialGuess(u_var, U_fl)
    prog.Solve()  # Solve with default osqp

    # solver.Solve(prog)
    print("Optimal u : ", prog.GetSolution(u_var))
    U_CLF_QP = prog.GetSolution(u_var)
    C_CLF_QP = prog.GetSolution(c_var)

    #---- Printing for debugging
    # dx = robobee_plantBS.evaluate_f(U_fl,x)
    # print("dx", dx)
    # print("\n######################")
    # # print("qe3:", A_fl[0,0])
    # print("u:", u)
    # print("\n####################33")

    # deta4 = B_qw+Rqe3*U_fl_zero[0]+np.matmul(-np.matmul(Rqe3_hat,I_inv),U_fl_zero[1:4])*xi1
    # deta6 = B_yaw+np.dot(g_yaw,U_fl_zero[1:4])
    # print("deta4:",deta4)
    # print("deta6:",deta6)
    print(C_CLF_QP)

    phi1_opt = np.dot(phi1_decouple, U_CLF_QP) + C_CLF_QP * eta_norm
    phi1_opt_FL = np.dot(phi1_decouple, U_fl)

    print("FL u: ", U_fl)
    print("CLF u:", U_CLF_QP)
    print("Cost FL: ", np.dot(mu, mu))

    v_CLF = np.dot(A_fl, U_CLF_QP) + L_fhx_star
    print("Cost CLF: ", np.dot(v_CLF, v_CLF))
    print("Constraint FL : ", phi0_exp + phi1_opt_FL)
    print("Constraint CLF : ", phi0_exp + phi1_opt)
    u = U_CLF_QP

    print("eigenvalues minQ maxP:", [min_e_Q, max_e_P])

    return u
Exemple #15
0
Mout = solve_continuous_are(Aout, Bout, Q, R)
# print("Mout",Mout)
# print("M_out:", Mout)

# Minimum and maximum eigenvalues for Q and Mout used for CLF-QP constraint

eval_Q = np.linalg.eigvals(Q)
eval_P = np.linalg.eigvals(Mout)

min_e_Q = np.min(eval_Q)
max_e_P = np.max(eval_P)

print("Evalues: ", [min_e_Q, max_e_P])
# Set up for QP problem
prog = MathematicalProgram()
u_var = prog.NewContinuousVariables(4, "u_var")
c_var = prog.NewContinuousVariables(1, "c_var")
solverid = prog.GetSolverId()

tol = 1e-10
prog.SetSolverOption(mp.SolverType.kIpopt, "tol", tol)
prog.SetSolverOption(mp.SolverType.kIpopt, "constr_viol_tol", tol)
prog.SetSolverOption(mp.SolverType.kIpopt, "acceptable_tol", tol)
prog.SetSolverOption(mp.SolverType.kIpopt, "acceptable_constr_viol_tol", tol)

prog.SetSolverOption(mp.SolverType.kIpopt, "print_level",
                     5)  # CAUTION: Assuming that solver used Ipopt

# #-[2] Linearization and get (K,S) for LQR

# A, B =robobee_plantBS.GetLinearizedDynamics(uf, xf)
Exemple #16
0
# Lyapunov function
V = x.dot(P).dot(x)
V_dot = 2*x.dot(P).dot(f(x))

# degree of the polynomial lambda(x)
# no need to change it, but if you really want to,
# keep l_deg even and do not set l_deg greater than 10
l_deg = 4
assert l_deg % 2 == 0

# SOS Lagrange multipliers
l = prog2.NewSosPolynomial(Variables(x), l_deg)[0].ToExpression()

# level set as optimization variable
rho = prog2.NewContinuousVariables(1, 'rho')[0]

# write here the SOS condition described in the "Not quite there yet..." section above
prog2.AddSosConstraint(x.dot(x)*(V-rho) - l*V_dot)

# insert here the objective function (maximize rho)
prog2.AddLinearCost(-rho)

# solve program only if the lines above are filled
if len(prog2.GetAllConstraints()) != 0:

    # solve SOS program
    result = Solve(prog2)

    # get maximum rho
    assert result.is_success()
Exemple #17
0
    def ComputeControlInput(self, x, t):
        # Set up things you might want...
        q = x[0:self.nq]
        v = x[self.nq:]

        kinsol = self.hand.doKinematics(x[0:self.hand.get_num_positions()])
        M = self.hand.massMatrix(kinsol)
        C = self.hand.dynamicsBiasTerm(kinsol, {}, None)
        B = self.hand.B

        # Assume grasp points are achieved, and calculate
        # contact jacobian at those points, as well as the current
        # grasp points and normals (in WORLD FRAME).
        grasp_points_world_now = self.transform_grasp_points_manipuland(q)
        grasp_normals_world_now = self.transform_grasp_normals_manipuland(q)
        J_manipuland = jacobian(self.transform_grasp_points_manipuland, q)
        ee_points_now = self.transform_grasp_points_fingers(q)
        J_manipulator = jacobian(self.transform_grasp_points_fingers, q)

        # The contact jacobian (Phi), for contact forces coming from
        # point contacts, describes how the movement of the contact point
        # maps into the joint coordinates of the robot. We can get this
        # by combining the jacobians derived from forward kinematics to each
        # of the two bodies that are in contact, evaluated at the contact point
        # in each body's frame.
        J_contact = J_manipuland - J_manipulator
        # Cut off the 3rd dimension, which should always be 0
        J_contact = J_contact[0:2, :, :]
        # Given these, the manipulator equations enter as a linear constraint
        # M qdd + C = Bu + J_contact lambda
        # (the combined effects of lambda on the robot).
        # The list of grasp points, in manipuland frame, that we'll use.
        n_cf = len(self.grasp_points)
        # The evaluated desired manipuland posture.
        manipuland_qdes = self.GetDesiredObjectPosition(t)

        # The desired robot (arm) posture, calculated via inverse kinematics
        # to achieve the desired grasp points on the object in its current
        # target posture.
        qdes, info = self.ComputeTargetPosture(x, manipuland_qdes)
        if info != 1:
            if not self.shut_up:
                print "Warning: target posture IK solve got info %d " \
                      "when computing goal posture at least once during " \
                      "simulation. This means the grasp points was hard to " \
                      "achieve given the current object posture. This is " \
                      "occasionally OK, but indicates that your controller " \
                      "is probably struggling a little." % info
                self.shut_up = True

        # From here, it's up to you. Following the guidelines in the problem
        # set, implement a controller to achieve the specified goals.

        mp = MathematicalProgram()

        #control variables = u = self.nu x 1
        #ddot{q} as variable w/ q's shape
        controls = mp.NewContinuousVariables(self.nu, "controls")
        qdd = mp.NewContinuousVariables(q.shape[0], "qdd")

        #make variables for lambda's and beta's
        k = 0
        b = mp.NewContinuousVariables(2, "betas_%d" % k)
        betas = b
        for i in range(len(self.grasp_points)):
            b = mp.NewContinuousVariables(
                2, "betas_%d" % k)  #pair of betas for each contact point
            betas = np.vstack((betas, b))
            mp.AddLinearConstraint(
                betas[i, 0] >= 0).evaluator().set_description(
                    "beta_0 greater than 0 for %d" % i)
            mp.AddLinearConstraint(
                betas[i, 1] >= 0).evaluator().set_description(
                    "beta_1 greater than 0 for %d" % i)
        #c_0=normals+mu*tangent
        #c1 = normals-mu*tangent
        n = grasp_normals_world_now.T[:, 0:2]  #row=contact point?
        t = get_perpendicular2d(n[0])
        c_i1 = n[0] + np.dot(self.mu, t)
        c_i2 = n[0] - np.dot(self.mu, t)
        l = c_i1.dot(betas[0, 0]) + c_i2.dot(betas[0, 1])
        lambdas = l

        for i in range(1, len(self.grasp_points)):
            n = grasp_normals_world_now.T[:, 0:
                                          2]  #row=contact point? transpose then index
            t = get_perpendicular2d(n[i])
            c_i1 = n[i] - np.dot(self.mu, t)
            c_i2 = n[i] + np.dot(self.mu, t)
            l = c_i1.dot(betas[i, 0]) + c_i2.dot(betas[i, 1])
            lambdas = np.vstack((lambdas, l))

        control_period = 0.0333
        #Constrain the dyanmics
        dyn = M.dot(qdd) + C - B.dot(controls)
        for i in range(q.shape[0]):
            j_c = 0
            for l in range(len(self.grasp_points)):
                j_sub = J_contact[:, l, :].T
                j_c += j_sub.dot(lambdas[l])


#             print(j_c.shape)
#             print(dyn.shape)
            mp.AddConstraint(dyn[i] - j_c[i] == 0)

        #PD controller using kinematics
        Kp = 100.0
        Kd = 1.0
        control_period = 0.0333
        next_q = q + v.dot(control_period) + np.dot(qdd.dot(control_period**2),
                                                    .5)
        next_q_dot = v + qdd.dot(control_period)
        mp.AddQuadraticCost(Kp * (qdes - next_q).dot((qdes - next_q).T) + Kd *
                            (next_q_dot).dot(next_q_dot.T))
        Kp_ext = 100.
        mp.AddQuadraticCost(Kp_ext * (qdes - next_q)[-3:].dot(
            (qdes - next_q)[-3:].T))

        res = Solve(mp)
        c = res.GetSolution(controls)
        return c
Exemple #18
0
    def PlanGraspPoints(self):
        # First, extract the bounding geometry of the object.
        # Generally, our geometry is coming from 3d models, so
        # we have to do some legwork to extract 2D geometry. For
        # the examples we'll use in this set, we'll assume
        # that extracting the convex hull of the first visual element
        # is a good representation of the object geometry. (This is
        # not great! But it'll do the job for us, since we're going
        # to experiment with only simple objects.)
        kinsol = self.hand.doKinematics(
            self.x_nom[0:self.hand.get_num_positions()])
        self.manipuland_link_index = \
            self.hand.FindBody(self.manipuland_link_name).get_body_index()
        body = self.hand.get_body(self.manipuland_link_index)
        # For projecting into XY plane
        Tview = np.array([[1., 0., 0., 0.], [0., 1., 0., 0.], [0., 0., 0.,
                                                               1.]])
        all_points = ExtractPlanarObjectGeometryConvexHull(body, Tview)

        # For later use: precompute the fingertip positions of the
        # robot in the nominal posture.
        nominal_fingertip_points = np.empty((2, self.num_fingers))
        for i in range(self.num_fingers):
            nominal_fingertip_points[:, i] = self.hand.transformPoints(
                kinsol, self.fingertip_position, self.finger_link_indices[i],
                0)[0:2, 0]

        # Search for an optimal grasp with N points
        random.seed(42)
        np.random.seed(42)

        def get_random_point_and_normal_on_surface(all_points):
            num_points = all_points.shape[1]
            i = random.choice(range(num_points - 1))
            first_point = np.asarray([all_points[0][i], all_points[1][i]])
            second_point = np.asarray(
                [all_points[0][i + 1], all_points[1][i + 1]])
            first_to_second = second_point - first_point
            # Clip to avoid interpolating close to a corner
            interpolate_param = np.clip(np.random.rand(), 0.2, 0.8)
            rand_point = first_point + interpolate_param * first_to_second

            normal = np.array([-first_to_second[1], first_to_second[0]]) \
                / np.linalg.norm(first_to_second)
            return rand_point, normal

        best_conv_volume = 0
        best_points = []
        best_normals = []
        best_finger_assignments = []
        for i in range(self.n_grasp_search_iters):
            grasp_points = []
            normals = []
            for j in range(self.num_fingers):
                point, normal = \
                    get_random_point_and_normal_on_surface(all_points)
                # check for duplicates or close points -- fingertip
                # radius is 0.2, so require twice that margin to avoid
                # intersection fingertips.
                num_rejected = 0
                while min([1.0] + [
                        np.linalg.norm(grasp_point - point, 2)
                        for grasp_point in grasp_points
                ]) <= 0.4:
                    point, normal = \
                        get_random_point_and_normal_on_surface(all_points)
                    num_rejected += 1
                    if num_rejected > 10000:
                        print "Rejected 10000 points in a row due to crowding." \
                              " Your object is a bit too small for your number of" \
                              " fingers."
                        break
                grasp_points.append(point)
                normals.append(normal)
            if achieves_force_closure(grasp_points, normals, self.mu):
                # Test #1: Rank in terms of convex hull volume of grasp points
                volume = compute_convex_hull_volume(grasp_points)
                if volume < best_conv_volume:
                    continue

                # Test #2: Does IK work for this point?
                self.grasp_points = grasp_points
                self.grasp_normals = normals

                # Pick optimal finger assignment that
                # minimizes distance between fingertips in the
                # nominal posture, and the chosen grasp points
                grasp_points_world = self.transform_grasp_points_manipuland(
                    self.x_nom)[0:2, :]

                prog = MathematicalProgram()
                # We'd *really* like these to be binary variables,
                # but unfortunately don't have a MIP solver available in the
                # course docker container. Instead, we'll solve an LP,
                # and round the result to the nearest feasible integer
                # solutions. Intuitively, this LP should probably reach its
                # optimal value at an extreme point (where the variables
                # all take integer value). I've not yet observed this
                # not occuring in practice!
                assignment_vars = prog.NewContinuousVariables(
                    self.num_fingers, self.num_fingers, "assignment")
                for grasp_i in range(self.num_fingers):
                    # Every row and column of assignment vars sum to one --
                    # each finger matches to one grasp position
                    prog.AddLinearConstraint(
                        np.sum(assignment_vars[:, grasp_i]) == 1.)
                    prog.AddLinearConstraint(
                        np.sum(assignment_vars[grasp_i, :]) == 1.)
                    for finger_i in range(self.num_fingers):
                        # If this grasp assignment is active,
                        # add a cost equal to the distance between
                        # nominal pose and grasp position
                        prog.AddLinearCost(
                            assignment_vars[grasp_i, finger_i] *
                            np.linalg.norm(grasp_points_world[:, grasp_i] -
                                           nominal_fingertip_points[:,
                                                                    finger_i]))
                        prog.AddBoundingBoxConstraint(
                            0., 1., assignment_vars[grasp_i, finger_i])
                result = Solve(prog)
                assignments = result.GetSolution(assignment_vars)
                # Round assignments to nearest feasible set
                claimed = [False] * self.num_fingers
                for grasp_i in range(self.num_fingers):
                    order = np.argsort(assignments[grasp_i, :])
                    fill_i = self.num_fingers - 1
                    while claimed[order[fill_i]] is not False:
                        fill_i -= 1
                    if fill_i < 0:
                        raise Exception("Finger association failed. "
                                        "Horrible bug. Tell Greg")
                    assignments[grasp_i, :] *= 0.
                    assignments[grasp_i, order[fill_i]] = 1.
                    claimed[order[fill_i]] = True

                # Populate actual assignments
                self.grasp_finger_assignments = []
                for grasp_i in range(self.num_fingers):
                    for finger_i in range(self.num_fingers):
                        if assignments[grasp_i, finger_i] == 1.:
                            self.grasp_finger_assignments.append(
                                (finger_i, np.array(self.fingertip_position)))
                            continue

                qinit, info = self.ComputeTargetPosture(
                    self.x_nom,
                    self.x_nom[(self.nq - 3):self.nq],
                    backoff_distance=0.2)
                if info != 1:
                    continue

                best_conv_volume = volume
                best_points = grasp_points
                best_normals = normals
                best_finger_assignments = self.grasp_finger_assignments

        if len(best_points) == 0:
            print "After %d attempts, couldn't find a good grasp "\
                  "for this object." % self.n_grasp_search_iters
            print "Proceeding with a horrible random guess."
            best_points = [
                np.random.uniform(-1., 1., 2) for i in range(self.num_fingers)
            ]
            best_normals = [
                np.random.uniform(-1., 1., 2) for i in range(self.num_fingers)
            ]
            best_finger_assignments = [(i, self.fingertip_position)
                                       for i in range(self.num_fingers)]

        self.grasp_points = best_points
        self.grasp_normals = best_normals
        self.grasp_finger_assignments = best_finger_assignments
Exemple #19
0
def apply_angular_velocity_to_quaternion(q, w_axis, w_mag, t):
    delta_q = np.hstack(
        [np.cos(w_mag * t / 2.0), w_axis * np.sin(w_mag * t / 2.0)])
    return quat_multiply(q, delta_q)


def backward_euler(q_qprev_v_dt):
    q, qprev, w_axis, w_mag, dt = np.split(
        q_qprev_v_dt, [4, 4 + 4, 4 + 4 + 3, 4 + 4 + 3 + 1])
    return q - apply_angular_velocity_to_quaternion(qprev, w_axis, w_mag, dt)


N = 4
prog = MathematicalProgram()
q = prog.NewContinuousVariables(rows=N, cols=4, name='q')
w_axis = prog.NewContinuousVariables(rows=N, cols=3, name="w_axis")
w_mag = prog.NewContinuousVariables(rows=N, cols=1, name="w_mag")
# dt = [0.0] + [1.0/(N-1)] * (N-1)
dt = prog.NewContinuousVariables(rows=N, cols=1, name="dt")

for k in range(N):
    (prog.AddConstraint(lambda x: [x @ x], [1], [
        1
    ], q[k]).evaluator().set_description(f"q[{k}] unit quaternion constraint"))
    # Impose unit length constraint on the rotation axis.
    (prog.AddConstraint(lambda x: [x @ x], [1], [1],
                        w_axis[k]).evaluator().set_description(
                            f"w_axis[{k}] unit axis constraint"))
for k in range(1, N):
    (prog.AddConstraint(
Exemple #20
0
    def ComputeControlInput(self, x, t):
        # Set up things you might want...
        q = x[0:self.nq]
        v = x[self.nq:]

        kinsol = self.hand.doKinematics(x)
        M = self.hand.massMatrix(kinsol)
        C = self.hand.dynamicsBiasTerm(kinsol, {}, None)
        B = self.hand.B

        # Assume grasp points are achieved, and calculate
        # contact jacobian at those points, as well as the current
        # grasp points and normals (in WORLD FRAME).
        grasp_points_world_now = self.transform_grasp_points_manipuland(q)
        grasp_normals_world_now = self.transform_grasp_normals_manipuland(q)
        J_manipuland = jacobian(
            self.transform_grasp_points_manipuland, q)
        ee_points_now = self.transform_grasp_points_fingers(q)
        J_manipulator = jacobian(
            self.transform_grasp_points_fingers, q)

        # The contact jacobian (Phi), for contact forces coming from
        # point contacts, describes how the movement of the contact point
        # maps into the joint coordinates of the robot. We can get this
        # by combining the jacobians derived from forward kinematics to each
        # of the two bodies that are in contact, evaluated at the contact point
        # in each body's frame.
        J_contact = J_manipuland - J_manipulator
        # Cut off the 3rd dimension, which should always be 0
        J_contact = J_contact[0:2, :, :]
        
        normals = grasp_normals_world_now[0:2, :].T

        # Given these, the manipulator equations enter as a linear constraint
        # M qdd + C = Bu + J_contact lambda
        # (the combined effects of lambda on the robot).
        # The list of grasp points, in manipuland frame, that we'll use.
        n_cf = len(self.grasp_points)
        # The evaluated desired manipuland posture.
        manipuland_qdes = self.GetDesiredObjectPosition(t)

        # The desired robot (arm) posture, calculated via inverse kinematics
        # to achieve the desired grasp points on the object in its current
        # target posture.
        qdes, info = self.ComputeTargetPosture(x, manipuland_qdes)
        if info != 1:
            if not self.shut_up:
                print "Warning: target posture IK solve got info %d " \
                      "when computing goal posture at least once during " \
                      "simulation. This means the grasp points was hard to " \
                      "achieve given the current object posture. This is " \
                      "occasionally OK, but indicates that your controller " \
                      "is probably struggling a little." % info
                self.shut_up = True

        # From here, it's up to you. Following the guidelines in the problem
        # set, implement a controller to achieve the specified goals.

        '''
        YOUR CODE HERE
        '''
        # This is not proper orthogonal vector...but it somehow makes it work...
        #    whereas the correct does not
        def ortho2(x):
            return np.array([x[0],-x[1]])
            #return np.array([x[1],-x[0]])

        prog = MathematicalProgram()
        
        dt = self.control_period
        
        u = prog.NewContinuousVariables(self.nu, "u")
        
        qdd = prog.NewContinuousVariables(q.shape[0], "qdd")
        for i in range(qdd.shape[0]):
            prog.AddLinearConstraint(qdd[i] <=  40)
            prog.AddLinearConstraint(qdd[i] >= -40)

            #prog.AddLinearConstraint(v[i] + qdd[i] * dt <=   80)
            #prog.AddLinearConstraint(v[i] - qdd[i] * dt >=  -80)
            
        beta = prog.NewContinuousVariables(n_cf, 2, "beta")
        #prog.AddQuadraticCost(0.1*np.sum(beta**2))
        for i in range(n_cf):
            prog.AddLinearConstraint(beta[i,0] >= 0)
            prog.AddLinearConstraint(beta[i,1] >= 0)
            prog.AddLinearConstraint(beta[i,0] <= 10)
            prog.AddLinearConstraint(beta[i,1] <= 10)
            
        c = np.zeros((n_cf,2,2))

        for i in range(n_cf):
            c[i,0] = normals[i] - self.mu * ortho2(normals[i])
            c[i,1] = normals[i] + self.mu * ortho2(normals[i])

        const = M.dot(qdd) + C - B.dot(u) ## eq 0

        for i in range(n_cf):
            lambda_i = c[i,0]*beta[i,0] + c[i,1]*beta[i,1]
            tmp = J_contact[:, i, :].T.dot(lambda_i)
            const -= tmp

        for i in range(const.shape[0]):
            prog.AddLinearConstraint(const[i] == 0.0)
        
        Kp = 1000
        Kd = 10
        
        #v2 = v + dt * qdd
        #q2 = q + v * dt + 0.5 * qdd * dt*dt
        
        v2 = v + dt * qdd
        q2 = q + (v+v2)/2 * dt #+ 0.5 * qdd * dt*dt

        #v2 = v + dt * qdd
        #q2 = q + v2 * dt
        prog.AddQuadraticCost(Kp * np.sum((qdes - q2) ** 2) + Kd * np.sum(v2**2))
        
        result = prog.Solve()
        u = prog.GetSolution(u)
        return u
Exemple #21
0
    def compute_trajectory(self,
                           obst_idx,
                           x_out,
                           y_out,
                           ux_out,
                           uy_out,
                           pose_initial=[0., 0., 0., 0.],
                           dt=0.05):
        '''
		Find trajectory with MILP
		input u are tyhe velocities (xd, yd)
		dt 0.05 according to a rate of 20 Hz
		'''
        mp = MathematicalProgram()
        N = 30
        k = 0
        # define input trajectory and state traj
        u = mp.NewContinuousVariables(2, "u_%d" % k)  # xd yd
        input_trajectory = u
        st = mp.NewContinuousVariables(4, "state_%d" % k)
        # # binary variables for obstalces constraint
        c = mp.NewBinaryVariables(4 * self.ang_discret, "c_%d" % k)
        obs = c
        states = st
        for k in range(1, N):
            u = mp.NewContinuousVariables(2, "u_%d" % k)
            input_trajectory = np.vstack((input_trajectory, u))
            st = mp.NewContinuousVariables(4, "state_%d" % k)
            states = np.vstack((states, st))
            c = mp.NewBinaryVariables(4 * self.ang_discret, "c_%d" % k)
            obs = np.vstack((obs, c))
        st = mp.NewContinuousVariables(4, "state_%d" % (N + 1))
        states = np.vstack((states, st))
        c = mp.NewBinaryVariables(4 * self.ang_discret, "c_%d" % k)
        obs = np.vstack((obs, c))
        ### define cost
        mp.AddLinearCost(100 *
                         (-states[-1, 0]))  # go as far forward as possible
        # mp.AddQuadraticCost(states[-1,1]*states[-1,1])
        # time constraint
        M = 1000  # slack var for obst costraint
        # state constraint
        for i in range(2):  # initial state constraint x y yaw
            mp.AddLinearConstraint(states[0, i] <= pose_initial[i])
            mp.AddLinearConstraint(states[0, i] >= pose_initial[i])
        for i in range(2):  # initial state constraint xd yd yawd
            mp.AddLinearConstraint(states[0, i] <= pose_initial[2 + i] + 1)
            mp.AddLinearConstraint(states[0, i] >= pose_initial[2 + i] - 1)
        for i in range(N):
            # state update according to dynamics
            state_next = self.quad_dynamics(states[i, :],
                                            input_trajectory[i, :], dt)
            for j in range(4):
                mp.AddLinearConstraint(states[i + 1, j] <= state_next[j])
                mp.AddLinearConstraint(states[i + 1, j] >= state_next[j])
            # obstacle constraint
            for j in range(self.ang_discret - 1):
                mp.AddLinearConstraint(sum(obs[i, 4 * j:4 * j + 4]) <= 3)
                ang_min = self.angles[j]  # lower angle bound of obstacle
                ang_max = self.angles[j + 1]  # higher angle bound of obstaclee
                if int(obst_idx[j]) < (self.rng_discret -
                                       1):  # less than max range measured
                    rng_min = self.ranges[int(
                        obst_idx[j])]  # where the obst is at at this angle
                    rng_max = self.ranges[int(obst_idx[j] + 1)]
                    mp.AddLinearConstraint(
                        states[i, 0] <= rng_min - 0.05 +
                        M * obs[i, 4 * j])  # xi <= xobs,low + M*c
                    mp.AddLinearConstraint(
                        states[i, 0] >= rng_max + 0.005 -
                        M * obs[i, 4 * j + 1])  # xi >= xobs,high - M*c
                    mp.AddLinearConstraint(
                        states[i, 1] <= states[i, 0] * np.tan(ang_min) - 0.05 +
                        M * obs[i, 4 * j + 2])  # yi <= xi*tan(ang,min) + M*c
                    mp.AddLinearConstraint(
                        states[i, 1] >= states[i, 0] * np.tan(ang_max) + 0.05 -
                        M * obs[i, 4 * j + 3])  # yi >= ci*tan(ang,max) - M*c
            # environmnt constraint, dont leave fov
            mp.AddLinearConstraint(
                states[i, 1] >= states[i, 0] * np.tan(-self.theta))
            mp.AddLinearConstraint(
                states[i, 1] <= states[i, 0] * np.tan(self.theta))
            # bound the inputs
            # mp.AddConstraint(input_trajectory[i,:].dot(input_trajectory[i,:]) <= 2.5*2.5) # dosnt work with multi int
            mp.AddLinearConstraint(input_trajectory[i, 0] <= 2.5)
            mp.AddLinearConstraint(input_trajectory[i, 0] >= -2.5)
            mp.AddLinearConstraint(input_trajectory[i, 1] <= 0.5)
            mp.AddLinearConstraint(input_trajectory[i, 1] >= -0.5)

        mp.Solve()

        input_trajectory = mp.GetSolution(input_trajectory)
        trajectory = mp.GetSolution(states)
        x_out[:] = trajectory[:, 0]
        y_out[:] = trajectory[:, 1]
        ux_out[:] = input_trajectory[:, 0]
        uy_out[:] = input_trajectory[:, 1]
        return trajectory, input_trajectory
Exemple #22
0
    def static_controller(self, qref, verbose=False):
        """ 
        Generates a controller to maintain a static pose
        
        Arguments:
            qref: (N,) numpy array, the static pose to be maintained

        Return Values:
            u: (M,) numpy array, actuations to best achieve static pose
            f: (C,) numpy array, associated normal reaction forces

        static_controller generates the actuations and reaction forces assuming the velocity and accelerations are zero. Thus, the equation to solve is:
            N(q) = B*u + J*f
        where N is a vector of gravitational and conservative generalized forces, B is the actuation selection matrix, and J is the contact-Jacobian transpose.

        Currently, static_controller only considers the effects of the normal forces. Frictional forces are not yet supported
        """
        #TODO: Solve for friction forces as well

        # Check inputs
        if qref.ndim > 1 or qref.shape[0] != self.multibody.num_positions():
            raise ValueError(
                f"Reference position mut be ({self.multibody.num_positions(),},) array"
            )
        # Set the context
        context = self.multibody.CreateDefaultContext()
        self.multibody.SetPositions(context, qref)
        # Get the necessary properties
        G = self.multibody.CalcGravityGeneralizedForces(context)
        Jn, _ = self.GetContactJacobians(context)
        phi = self.GetNormalDistances(context)
        B = self.multibody.MakeActuationMatrix()
        #Collect terms
        A = np.concatenate([B, Jn.transpose()], axis=1)
        # Create the mathematical program
        prog = MathematicalProgram()
        l_var = prog.NewContinuousVariables(self.num_contacts(), name="forces")
        u_var = prog.NewContinuousVariables(self.multibody.num_actuators(),
                                            name="controls")
        # Ensure dynamics approximately satisfied
        prog.AddL2NormCost(A=A,
                           b=-G,
                           vars=np.concatenate([u_var, l_var], axis=0))
        # Enforce normal complementarity
        prog.AddBoundingBoxConstraint(np.zeros(l_var.shape),
                                      np.full(l_var.shape, np.inf), l_var)
        prog.AddConstraint(phi.dot(l_var) == 0)
        # Solve
        result = Solve(prog)
        # Check for a solution
        if result.is_success():
            u = result.GetSolution(u_var)
            f = result.GetSolution(l_var)
            if verbose:
                printProgramReport(result, prog)
            return (u, f)
        else:
            print(f"Optimization failed. Returning zeros")
            if verbose:
                printProgramReport(result, prog)
            return (np.zeros(u_var.shape), np.zeros(l_var.shape))
Exemple #23
0
    def MILP_compute_traj(self,
                          obst_idx,
                          x_out,
                          y_out,
                          dx,
                          dy,
                          pose_initial=[0., 0.]):
        '''
		Find trajectory with MILP
		Outputs trajectory (waypoints) and new K for control 
		'''
        mp = MathematicalProgram()
        N = 8
        k = 0
        # define state traj
        st = mp.NewContinuousVariables(2, "state_%d" % k)
        # # binary variables for obstalces constraint
        c = mp.NewBinaryVariables(4 * self.ang_discret, "c_%d" % k)
        obs = c
        states = st
        for k in range(1, N):
            st = mp.NewContinuousVariables(2, "state_%d" % k)
            states = np.vstack((states, st))
            c = mp.NewBinaryVariables(4 * self.ang_discret, "c_%d" % k)
            obs = np.vstack((obs, c))
        st = mp.NewContinuousVariables(2, "state_%d" % (N + 1))
        states = np.vstack((states, st))
        c = mp.NewBinaryVariables(4 * self.ang_discret, "c_%d" % k)
        obs = np.vstack((obs, c))
        # variables encoding max x y dist from obstacle
        x_margin = mp.NewContinuousVariables(1, "x_margin")
        y_margin = mp.NewContinuousVariables(1, "y_margin")
        ### define cost
        for i in range(N):
            mp.AddLinearCost(-states[i, 0])  # go as far forward as possible
        mp.AddLinearCost(-states[-1, 0])
        mp.AddLinearCost(-x_margin[0])
        mp.AddLinearCost(-y_margin[0])
        # bound x y margin so it doesn't explode
        mp.AddLinearConstraint(x_margin[0] <= 3.)
        mp.AddLinearConstraint(y_margin[0] <= 3.)
        # x y is non ngative adn at least above robot radius
        mp.AddLinearConstraint(x_margin[0] >= 0.05)
        mp.AddLinearConstraint(y_margin[0] >= 0.05)
        M = 1000  # slack var for integer things
        # state constraint
        for i in range(2):  # initial state constraint x y
            mp.AddLinearConstraint(states[0, i] <= pose_initial[i])
            mp.AddLinearConstraint(states[0, i] >= pose_initial[i])
        for i in range(N):
            mp.AddQuadraticCost((states[i + 1, 1] - states[i, 1])**2)
            mp.AddLinearConstraint(states[i + 1, 0] <= states[i, 0] + dx)
            mp.AddLinearConstraint(states[i + 1, 0] >= states[i, 0] - dx)
            mp.AddLinearConstraint(states[i + 1, 1] <= states[i, 1] + dy)
            mp.AddLinearConstraint(states[i + 1, 1] >= states[i, 1] - dy)
            # obstacle constraint
            for j in range(self.ang_discret - 1):
                mp.AddLinearConstraint(sum(obs[i, 4 * j:4 * j + 4]) <= 3)
                ang_min = self.angles[j]  # lower angle bound of obstacle
                ang_max = self.angles[j + 1]  # higher angle bound of obstaclee
                if int(obst_idx[j]) < (self.rng_discret -
                                       1):  # less than max range measured
                    rng_min = self.ranges[int(
                        obst_idx[j])]  # where the obst is at at this angle
                    rng_max = self.ranges[int(obst_idx[j] + 1)]
                    mp.AddLinearConstraint(
                        states[i, 0] <= rng_min - x_margin[0] +
                        M * obs[i, 4 * j])  # xi <= xobs,low + M*c
                    mp.AddLinearConstraint(
                        states[i, 0] >= rng_max + x_margin[0] -
                        M * obs[i, 4 * j + 1])  # xi >= xobs,high - M*c
                    mp.AddLinearConstraint(
                        states[i, 1] <=
                        states[i, 0] * np.tan(ang_min) - y_margin[0] +
                        M * obs[i, 4 * j + 2])  # yi <= xi*tan(ang,min) + M*c
                    mp.AddLinearConstraint(
                        states[i, 1] >=
                        states[i, 0] * np.tan(ang_max) + y_margin[0] -
                        M * obs[i, 4 * j + 3])  # yi >= ci*tan(ang,max) - M*c
        # obstacle constraint for last state
        for j in range(self.ang_discret - 1):
            mp.AddLinearConstraint(sum(obs[N, 4 * j:4 * j + 4]) <= 3)
            ang_min = self.angles[j]  # lower angle bound of obstacle
            ang_max = self.angles[j + 1]  # higher angle bound of obstaclee
            if int(obst_idx[j]) < (self.rng_discret -
                                   1):  # less than max range measured
                rng_min = self.ranges[int(
                    obst_idx[j])]  # where the obst is at at this angle
                rng_max = self.ranges[int(obst_idx[j] + 1)]
                mp.AddLinearConstraint(
                    states[N, 0] <= rng_min - x_margin[0] +
                    M * obs[N, 4 * j])  # xi <= xobs,low + M*c
                mp.AddLinearConstraint(
                    states[N, 0] >= rng_max + x_margin[0] -
                    M * obs[N, 4 * j + 1])  # xi >= xobs,high - M*c
                mp.AddLinearConstraint(
                    states[N,
                           1] <= states[N, 0] * np.tan(ang_min) - y_margin[0] +
                    M * obs[N, 4 * j + 2])  # yi <= xi*tan(ang,min) + M*c
                mp.AddLinearConstraint(
                    states[N,
                           1] >= states[N, 0] * np.tan(ang_max) + y_margin[0] -
                    M * obs[N, 4 * j + 3])  # yi >= ci*tan(ang,max) - M*c

        mp.Solve()

        trajectory = mp.GetSolution(states)
        xm = mp.GetSolution(x_margin)
        ym = mp.GetSolution(y_margin)
        x_out[:] = trajectory[:, 0]
        y_out[:] = trajectory[:, 1]
        return trajectory, xm[0], ym[0]
Exemple #24
0
def trajopt_simulation(x0, xf, u_max=0.5):
	# numeric parameters
	time_interval = .1
	time_steps = 400

	# initialize optimization
	prog = MathematicalProgram()

	# optimization variables
	state = prog.NewContinuousVariables(time_steps + 1, 3, 'state')
	u = prog.NewContinuousVariables(time_steps, 1, 'u')

	# final position constraint
	# for residual in constraint_state_to_orbit(state[-2], state[-1], xf, 1/u_max, time_interval):
	# 	prog.AddConstraint(residual == 0)

	# initial position constraint
	prog.AddConstraint(eq(state[0],x0))

	# discretized dynamics
	for t in range(time_steps):
		residuals = dubins_discrete_dynamics(state[t], state[t+1], u[t], time_interval)
		for residual in residuals:
			prog.AddConstraint(residual == 0)

		# control limits
		prog.AddConstraint(u[t][0] <= u_max)
		prog.AddConstraint(-u_max <= u[t][0])

		# cost - increases cost if off the orbit
		p = state[t][:2] - xf[:2]
		v = (state[t+1][:2] - state[t][:2]) / time_interval
		# prog.AddCost(time_interval)
		# prog.AddCost(u[t][0]*u[t][0]*time_interval)
		# prog.AddCost(p.dot(v)*time_interval)
		for residual in constraint_state_to_orbit(state[t], state[t+1], xf, 1/u_max, time_interval):
			# prog.AddConstraint(residual >= 0)
			prog.AddQuadraticCost(residual)
		# prog.AddCost((p.dot(p)))




	# # initial guess
	state_guess = interpolate_dubins_state(
		np.array([0, 0, np.pi]),
		xf,
		time_steps, 
		time_interval,
		1/u_max
		)

	prog.SetInitialGuess(state, state_guess)

	solver = SnoptSolver()
	result = solver.Solve(prog)

	assert result.is_success()

	# retrieve optimal solution
	u_opt = result.GetSolution(u).T
	state_opt = result.GetSolution(state).T

	return state_opt, u_opt
    def intercepting_with_obs_avoidance_bb(self,
                                           p0,
                                           v0,
                                           pf,
                                           vf,
                                           T,
                                           obstacles=None,
                                           p_puck=None):
        """kick while avoiding obstacles using big M formulation and branch and bound"""
        x0 = np.array(np.concatenate((p0, v0), axis=0))
        xf = np.concatenate((pf, vf), axis=0)
        prog = MathematicalProgram()

        # state and control inputs
        N = int(T / self.params.dt)  # number of command steps
        state = prog.NewContinuousVariables(N + 1, 4, 'state')
        cmd = prog.NewContinuousVariables(N, 2, 'input')

        # Initial and final state
        prog.AddLinearConstraint(eq(state[0], x0))
        prog.AddLinearConstraint(eq(state[-1], xf))

        self.add_dynamics(prog, N, state, cmd)

        ## Input saturation
        self.add_input_limits(prog, cmd, N)

        # Arena constraints
        self.add_arena_limits(prog, state, N)

        # Add Mixed-integer constraints - will solve with BB

        # avoid hitting the puck while generating a kicking trajectory
        # MILP formulation with B&B solver
        x_obs_puck = prog.NewBinaryVariables(rows=N + 1,
                                             cols=2)  # obs x_min, obs x_max
        y_obs_puck = prog.NewBinaryVariables(rows=N + 1,
                                             cols=2)  # obs y_min, obs y_max
        self.avoid_puck_bigm(prog, x_obs_puck, y_obs_puck, state, N, p_puck)

        # Avoid other players
        if obstacles != None:
            x_obs_player = list()
            y_obs_player = list()
            for i, obs in enumerate(obstacles):
                x_obs_player.append(prog.NewBinaryVariables(
                    rows=N + 1, cols=2))  # obs x_min, obs x_max
                y_obs_player.append(prog.NewBinaryVariables(
                    rows=N + 1, cols=2))  # obs y_min, obs y_max
                self.avoid_other_player_bigm(prog, x_obs_player[i],
                                             y_obs_player[i], state, obs, N)

        # Solve with simple b&b solver
        for k in range(N):
            prog.AddQuadraticCost(cmd[k].flatten().dot(cmd[k]))

        bb = branch_and_bound.MixedIntegerBranchAndBound(
            prog,
            OsqpSolver().solver_id())
        result = bb.Solve()
        if result != result.kSolutionFound:
            raise ValueError('Infeasible optimization problem.')
        u_values = np.array([bb.GetSolution(u) for u in cmd]).T
        solution_found = result.kSolutionFound
        return solution_found, u_values
Exemple #26
0
    def __init__(self,
                 rtree,
                 q_nom,
                 control_period=0.001,
                 print_period=1.0,
                 sim=True,
                 cost_pub=False):

        LeafSystem.__init__(self)

        self.rtree = rtree
        self.nq = rtree.get_num_positions()
        self.nv = rtree.get_num_velocities()
        self.nu = rtree.get_num_actuators()
        self.cost_pub = cost_pub

        dim = 3  # 3D
        nd = 4  # for friction cone approx, hard coded for now
        self.nc = 4  # number of contacts; TODO don't hard code

        self.nf = self.nc * nd  # number of contact force variables
        self.ncf = 2  # number of loop constraint forces; TODO don't hard code
        self.neps = self.nc * dim  # number of slack variables for contact

        if sim:
            self.sim = True
            self._DeclareInputPort(PortDataType.kVectorValued,
                                   self.nq + self.nv)
            self._DeclareDiscreteState(self.nu)
            self._DeclareVectorOutputPort(BasicVector(self.nu),
                                          self.CopyStateOutSim)

            self.print_period = print_period
            self.last_print_time = -print_period

            self.last_v = np.zeros(3)
            self.lcmgl = lcmgl("Balancing-plan", true_lcm.LCM())
            # self.lcmgl = None
        else:
            self.sim = False
            self._DeclareInputPort(PortDataType.kVectorValued, 1)
            self._DeclareInputPort(PortDataType.kVectorValued,
                                   kCassiePositions)
            self._DeclareInputPort(PortDataType.kVectorValued,
                                   kCassieVelocities)
            self._DeclareInputPort(PortDataType.kVectorValued, 2)
            self._DeclareDiscreteState(kCassieActuators * 8 + 1)
            self._DeclareVectorOutputPort(
                BasicVector(1),
                lambda c, o: self.CopyStateOut(c, o, 8 * kCassieActuators, 1))
            self._DeclareVectorOutputPort(
                BasicVector(kCassieActuators), lambda c, o: self.CopyStateOut(
                    c, o, 0, kCassieActuators))  #torque
            self._DeclareVectorOutputPort(
                BasicVector(kCassieActuators), lambda c, o: self.CopyStateOut(
                    c, o, kCassieActuators, kCassieActuators))  #motor_pos
            self._DeclareVectorOutputPort(
                BasicVector(kCassieActuators), lambda c, o: self.CopyStateOut(
                    c, o, 2 * kCassieActuators, kCassieActuators))  #motor_vel
            self._DeclareVectorOutputPort(
                BasicVector(kCassieActuators), lambda c, o: self.CopyStateOut(
                    c, o, 3 * kCassieActuators, kCassieActuators))  #kp
            self._DeclareVectorOutputPort(
                BasicVector(kCassieActuators), lambda c, o: self.CopyStateOut(
                    c, o, 4 * kCassieActuators, kCassieActuators))  #kd
            self._DeclareVectorOutputPort(
                BasicVector(kCassieActuators), lambda c, o: self.CopyStateOut(
                    c, o, 5 * kCassieActuators, kCassieActuators))  #ki
            self._DeclareVectorOutputPort(
                BasicVector(kCassieActuators), lambda c, o: self.CopyStateOut(
                    c, o, 6 * kCassieActuators, kCassieActuators))  #leak
            self._DeclareVectorOutputPort(
                BasicVector(kCassieActuators), lambda c, o: self.CopyStateOut(
                    c, o, 7 * kCassieActuators, kCassieActuators))  #clamp
        self._DeclarePeriodicDiscreteUpdate(0.0005)

        self.q_nom = q_nom
        self.com_des = rtree.centerOfMass(self.rtree.doKinematics(q_nom))
        self.u_des = CassieFixedPointTorque()

        self.lfoot = rtree.FindBody("toe_left")
        self.rfoot = rtree.FindBody("toe_right")
        self.springs = 2 + np.array([
            rtree.FindIndexOfChildBodyOfJoint("knee_spring_left_fixed"),
            rtree.FindIndexOfChildBodyOfJoint("knee_spring_right_fixed"),
            rtree.FindIndexOfChildBodyOfJoint("ankle_spring_joint_left"),
            rtree.FindIndexOfChildBodyOfJoint("ankle_spring_joint_right")
        ])

        umin = np.zeros(self.nu)
        umax = np.zeros(self.nu)
        ii = 0
        for motor in rtree.actuators:
            umin[ii] = motor.effort_limit_min
            umax[ii] = motor.effort_limit_max
            ii += 1

        slack_limit = 10.0
        self.initialized = False

        #------------------------------------------------------------
        # Add Decision Variables ------------------------------------
        prog = MathematicalProgram()
        qddot = prog.NewContinuousVariables(self.nq, "joint acceleration")
        u = prog.NewContinuousVariables(self.nu, "input")
        bar = prog.NewContinuousVariables(self.ncf, "four bar forces")
        beta = prog.NewContinuousVariables(self.nf, "friction forces")
        eps = prog.NewContinuousVariables(self.neps, "slack variable")
        nparams = prog.num_vars()

        #------------------------------------------------------------
        # Problem Constraints ---------------------------------------
        self.con_u_lim = prog.AddBoundingBoxConstraint(umin, umax,
                                                       u).evaluator()
        self.con_fric_lim = prog.AddBoundingBoxConstraint(0, 100000,
                                                          beta).evaluator()
        self.con_slack_lim = prog.AddBoundingBoxConstraint(
            -slack_limit, slack_limit, eps).evaluator()

        bar_con = np.zeros((self.ncf, self.nq))
        self.con_4bar = prog.AddLinearEqualityConstraint(
            bar_con, np.zeros(self.ncf), qddot).evaluator()

        if self.nc > 0:
            dyn_con = np.zeros(
                (self.nq, self.nq + self.nu + self.ncf + self.nf))
            dyn_vars = np.concatenate((qddot, u, bar, beta))
            self.con_dyn = prog.AddLinearEqualityConstraint(
                dyn_con, np.zeros(self.nq), dyn_vars).evaluator()

            foot_con = np.zeros((self.neps, self.nq + self.neps))
            foot_vars = np.concatenate((qddot, eps))
            self.con_foot = prog.AddLinearEqualityConstraint(
                foot_con, np.zeros(self.neps), foot_vars).evaluator()

        else:
            dyn_con = np.zeros((self.nq, self.nq + self.nu + self.ncf))
            dyn_vars = np.concatenate((qddot, u, bar))
            self.con_dyn = prog.AddLinearEqualityConstraint(
                dyn_con, np.zeros(self.nq), dyn_vars).evaluator()

        #------------------------------------------------------------
        # Problem Costs ---------------------------------------------
        self.Kp_com = 50
        self.Kd_com = 2.0 * sqrt(self.Kp_com)
        # self.Kp_qdd = 10*np.eye(self.nq)#np.diag(np.diag(H)/np.max(np.diag(H)))
        self.Kp_qdd = np.diag(
            np.concatenate(([10, 10, 10, 5, 5, 5], np.zeros(self.nq - 6))))
        self.Kd_qdd = 1.0 * np.sqrt(self.Kp_qdd)
        self.Kp_qdd[self.springs, self.springs] = 0
        self.Kd_qdd[self.springs, self.springs] = 0

        com_ddot_des = np.zeros(dim)
        qddot_des = np.zeros(self.nq)

        self.w_com = 0
        self.w_qdd = np.zeros(self.nq)
        self.w_qdd[:6] = 10
        self.w_qdd[8:10] = 1
        self.w_qdd[3:6] = 1000
        # self.w_qdd[self.springs] = 0
        self.w_u = 0.001 * np.ones(self.nu)
        self.w_u[2:4] = 0.01
        self.w_slack = 0.1

        self.cost_qdd = prog.AddQuadraticErrorCost(np.diag(self.w_qdd),
                                                   qddot_des,
                                                   qddot).evaluator()
        self.cost_u = prog.AddQuadraticErrorCost(np.diag(self.w_u), self.u_des,
                                                 u).evaluator()
        self.cost_slack = prog.AddQuadraticErrorCost(
            self.w_slack * np.eye(self.neps), np.zeros(self.neps),
            eps).evaluator()

        # self.cost_com = prog.AddQuadraticCost().evaluator()
        # self.cost_qdd = prog.AddQuadraticCost(
        #     2*np.diag(self.w_qdd), -2*np.matmul(qddot_des, np.diag(self.w_qdd)), qddot).evaluator()
        # self.cost_u = prog.AddQuadraticCost(
        #     2*np.diag(self.w_u), -2*np.matmul(self.u_des, np.diag(self.w_u)) , u).evaluator()
        # self.cost_slack = prog.AddQuadraticCost(
        #     2*self.w_slack*np.eye(self.neps), np.zeros(self.neps), eps).evaluator()

        REG = 1e-8
        self.cost_reg = prog.AddQuadraticErrorCost(
            REG * np.eye(nparams), np.zeros(nparams),
            prog.decision_variables()).evaluator()
        # self.cost_reg = prog.AddQuadraticCost(2*REG*np.eye(nparams),
        #     np.zeros(nparams), prog.decision_variables()).evaluator()

        #------------------------------------------------------------
        # Solver settings -------------------------------------------
        self.solver = GurobiSolver()
        # self.solver = OsqpSolver()
        prog.SetSolverOption(SolverType.kGurobi, "Method", 2)

        #------------------------------------------------------------
        # Save MathProg Variables -----------------------------------
        self.qddot = qddot
        self.u = u
        self.bar = bar
        self.beta = beta
        self.eps = eps
        self.prog = prog
Exemple #27
0
    def ComputeControlInput(self, x, t):
        # Set up things you might want...
        q = x[0:self.nq]
        v = x[self.nq:]

        kinsol = self.hand.doKinematics(x[0:self.hand.get_num_positions()])
        M = self.hand.massMatrix(kinsol)
        C = self.hand.dynamicsBiasTerm(kinsol, {}, None)
        B = self.hand.B

        # Assume grasp points are achieved, and calculate
        # contact jacobian at those points, as well as the current
        # grasp points and normals (in WORLD FRAME).
        grasp_points_world_now = self.transform_grasp_points_manipuland(q)
        grasp_normals_world_now = self.transform_grasp_normals_manipuland(q)
        J_manipuland = jacobian(
            self.transform_grasp_points_manipuland, q)
        ee_points_now = self.transform_grasp_points_fingers(q)
        J_manipulator = jacobian(
            self.transform_grasp_points_fingers, q)

        # The contact jacobian (Phi), for contact forces coming from
        # point contacts, describes how the movement of the contact point
        # maps into the joint coordinates of the robot. We can get this
        # by combining the jacobians derived from forward kinematics to each
        # of the two bodies that are in contact, evaluated at the contact point
        # in each body's frame.
        J_contact = J_manipuland - J_manipulator
        # Cut off the 3rd dimension, which should always be 0
        J_contact = J_contact[0:2, :, :]
        # Given these, the manipulator equations enter as a linear constraint
        # M qdd + C = Bu + J_contact lambda
        # (the combined effects of lambda on the robot).
        # The list of grasp points, in manipuland frame, that we'll use.
        n_cf = len(self.grasp_points)
        number_of_grasp_points = len(self.grasp_points)
        # The evaluated desired manipuland posture.
        manipuland_qdes = self.GetDesiredObjectPosition(t)

        # The desired robot (arm) posture, calculated via inverse kinematics
        # to achieve the desired grasp points on the object in its current
        # target posture.
        qdes, info = self.ComputeTargetPosture(x, manipuland_qdes)
        # print('shape' + str(np.shape(qdes)))
        # print('self.nq' + str(self.nq))
        # print('self.nu' + str(self.nu))
        if info != 1:
            if not self.shut_up:
                print "Warning: target posture IK solve got info %d " \
                      "when computing goal posture at least once during " \
                      "simulation. This means the grasp points was hard to " \
                      "achieve given the current object posture. This is " \
                      "occasionally OK, but indicates that your controller " \
                      "is probably struggling a little." % info
                self.shut_up = True

        # From here, it's up to you. Following the guidelines in the problem
        # set, implement a controller to achieve the specified goals.

        kd = 1
        kp = 25

        from pydrake.all import MathematicalProgram, Solve
        mp = MathematicalProgram()

        u = mp.NewContinuousVariables(self.nu, "u")
        qdd = mp.NewContinuousVariables(self.nq, "qdd")
        x_y_dimensions = 2

        lambda_variable = mp.NewContinuousVariables(x_y_dimensions, number_of_grasp_points, "lambda_variable")

        forces = np.zeros((self.nq))
        for i in range(number_of_grasp_points):
            current_forces = np.transpose(J_contact)[:,i,:].dot(lambda_variable[:,i])
            forces = forces + current_forces

        leftHandSide = M.dot(qdd) + C
        rightHandSide = B.dot(u) + forces

        for i in range(len(leftHandSide)):
            mp.AddConstraint(leftHandSide[i] == rightHandSide[i])

        # Calculate Normals
        normals = np.array([])
        for i in range(number_of_grasp_points):
            current_grasp_normal = grasp_normals_world_now[0:2, i]
            normals = np.vstack((normals, current_grasp_normal)) if normals.size else current_grasp_normal

        # Calculate Tangeants
        tangeants = np.array([])
        for i in range(number_of_grasp_points):
            current_grasp_tangeant = np.array([normals[i, 1], -normals[i, 0]])
            tangeants = np.vstack((tangeants, current_grasp_tangeant)) if tangeants.size else current_grasp_tangeant

        # Create beta variables
        beta = mp.NewContinuousVariables(2, number_of_grasp_points, "b0")

        # Create Grasps
        for i in range(number_of_grasp_points):
            c0 = normals[i] - self.mu * tangeants[i]
            c1 = normals[i] + self.mu * tangeants[i]
            right_hand_lambda1 = c0 * beta[0, i] + c1 * beta[1, i]
            mp.AddConstraint(lambda_variable[0, i] == right_hand_lambda1[0])
            mp.AddConstraint(lambda_variable[1, i] == right_hand_lambda1[1])
            mp.AddConstraint(beta[0, i] >= 0)
            mp.AddConstraint(beta[1, i] >= 0)
            mp.AddConstraint(normals[i].dot(lambda_variable[:, i]) >= 0.25)

        # Copying the control period of the constructor. Probably not supposed to do this...
        next_tick_qd = v + qdd * self.cont¨rol_period
        next_tick_q = q + next_tick_qd * self.control_period

        q_error = qdes - next_tick_q
        proportionalCost = q_error.dot(np.transpose(q_error))
        qd_error = 0 - next_tick_qd
        diffCost = qd_error.dot(np.transpose(qd_error))
        mp.AddQuadraticCost(kp * proportionalCost + kd * diffCost)
        result = Solve(mp)
        u_solution = result.GetSolution(u)

        u = np.zeros(self.nu)
        return u_solution
Exemple #28
0
class Optimization:
    def __init__(self, config):
        self.physical_params = config["physical_params"]
        self.T = config["T"]
        self.dt = config["dt"]
        self.initial_state = config["xinit"]
        self.goal_state = config["xgoal"]
        self.ellipse_arc = config["ellipse_arc"]
        self.deviation_cost = config["deviation_cost"]
        self.Qf = config["Qf"]
        self.limits = config["limits"]
        self.n_state = 6
        self.n_nominal_forces = 4
        self.tire_model = config["tire_model"]
        self.initial_guess_config = config["initial_guess_config"]
        self.puddle_model = config["puddle_model"]
        self.force_constraint = config["force_constraint"]
        self.visualize_initial_guess = config["visualize_initial_guess"]
        self.dynamics = Dynamics(self.physical_params.lf,
                                 self.physical_params.lr,
                                 self.physical_params.m,
                                 self.physical_params.Iz, self.dt)

    def build_program(self):
        self.prog = MathematicalProgram()

        # Declare variables.
        state = self.prog.NewContinuousVariables(rows=self.T + 1,
                                                 cols=self.n_state,
                                                 name='state')
        nominal_forces = self.prog.NewContinuousVariables(
            rows=self.T, cols=self.n_nominal_forces, name='nominal_forces')
        steers = self.prog.NewContinuousVariables(rows=self.T, name="steers")
        slip_ratios = self.prog.NewContinuousVariables(rows=self.T,
                                                       cols=2,
                                                       name="slip_ratios")
        self.state = state
        self.nominal_forces = nominal_forces
        self.steers = steers
        self.slip_ratios = slip_ratios

        # Set the initial state.
        xinit = pack_state_vector(self.initial_state)
        for i, s in enumerate(xinit):
            self.prog.AddConstraint(state[0, i] == s)

        # Constrain xdot to always be at least some value to prevent numerical issues with optimizer.
        for i in range(self.T + 1):
            s = unpack_state_vector(state[i])
            self.prog.AddConstraint(s["xdot"] >= self.limits.min_xdot)

            # Constrain slip ratio to be at least a certain magnitude.
            if i != self.T:
                self.prog.AddConstraint(
                    slip_ratios[i,
                                0]**2.0 >= self.limits.min_slip_ratio_mag**2.0)
                self.prog.AddConstraint(
                    slip_ratios[i,
                                1]**2.0 >= self.limits.min_slip_ratio_mag**2.0)

        # Control rate limits.
        for i in range(self.T - 1):
            ddelta = self.dt * (steers[i + 1] - steers[i])
            f_dkappa = self.dt * (slip_ratios[i + 1, 0] - slip_ratios[i, 0])
            r_dkappa = self.dt * (slip_ratios[i + 1, 1] - slip_ratios[i, 1])
            self.prog.AddConstraint(ddelta <= self.limits.max_ddelta)
            self.prog.AddConstraint(ddelta >= -self.limits.max_ddelta)
            self.prog.AddConstraint(f_dkappa <= self.limits.max_dkappa)
            self.prog.AddConstraint(f_dkappa >= -self.limits.max_dkappa)
            self.prog.AddConstraint(r_dkappa <= self.limits.max_dkappa)
            self.prog.AddConstraint(r_dkappa >= -self.limits.max_dkappa)

        # Control value limits.
        for i in range(self.T):
            self.prog.AddConstraint(steers[i] <= self.limits.max_delta)
            self.prog.AddConstraint(steers[i] >= -self.limits.max_delta)

        # Add dynamics constraints by constraining residuals to be zero.
        for i in range(self.T):
            residuals = self.dynamics.nominal_dynamics(state[i], state[i + 1],
                                                       nominal_forces[i],
                                                       steers[i])
            for r in residuals:
                self.prog.AddConstraint(r == 0.0)

        # Add the cost function.
        self.add_cost(state)

        # Add a different force constraint depending on the configuration.
        if self.force_constraint == ForceConstraint.NO_PUDDLE:
            self.add_no_puddle_force_constraints(
                state, nominal_forces, steers,
                self.tire_model.get_deterministic_model(), slip_ratios)
        elif self.force_constraint == ForceConstraint.MEAN_CONSTRAINED:
            self.add_mean_constrained()
        elif self.force_constraint == ForceConstraint.CHANCE_CONSTRAINED:
            self.add_chance_constraints()
        else:
            raise NotImplementedError("ForceConstraint type invalid.")
        return

    def constant_input_initial_guess(self, state, steers, slip_ratios,
                                     nominal_forces):
        # Guess the slip ratio as the minimum allowed value.
        gslip_ratios = np.tile(
            np.array([
                self.limits.min_slip_ratio_mag, self.limits.min_slip_ratio_mag
            ]), (self.T, 1))

        # Guess the slip angle as a linearly ramping steer that then becomes constant.
        # This is done by creating an array of values corresponding to end_delta then
        # filling in the initial ramp up phase.
        gsteers = self.initial_guess_config["end_delta"] * np.ones(self.T)
        igc = self.initial_guess_config
        for i in range(igc["ramp_steps"]):
            gsteers[i] = (i / igc["ramp_steps"]) * (igc["end_delta"] -
                                                    igc["start_delta"])

        # Create empty arrays for state and forces.
        gstate = np.empty((self.T + 1, self.n_state))
        gstate[0] = pack_state_vector(self.initial_state)
        gforces = np.empty((self.T, 4))
        all_slips = self.T * [None]

        # Simulate the dynamics for the initial guess differently depending on the force
        # constraint being used.
        if self.force_constraint == ForceConstraint.NO_PUDDLE:
            tire_model = self.tire_model.get_deterministic_model()
            for i in range(self.T):
                s = unpack_state_vector(gstate[i])

                # Simulate the dynamics for one time step.
                gstate[i + 1], forces, slips = self.dynamics.simulate(
                    gstate[i], gsteers[i], gslip_ratios[i, 0],
                    gslip_ratios[i, 1], tire_model, self.dt)

                # Store the results.
                gforces[i] = pack_force_vector(forces)
                all_slips[i] = slips
        elif self.force_constraint == ForceConstraint.MEAN_CONSTRAINED or self.force_constraint == ForceConstraint.CHANCE_CONSTRAINED:
            # mean model is a function that maps (slip_ratio, slip_angle, x, y) -> (E[Fx], E[Fy])
            mean_model = self.tire_model.get_mean_model(
                self.puddle_model.get_mean_fun())

            for i in range(self.T):
                # Update the tire model based off the conditions of the world
                # at (x, y)
                s = unpack_state_vector(gstate[i])
                tire_model = lambda slip_ratio, slip_angle: mean_model(
                    slip_ratio, slip_angle, s["X"], s["Y"])

                # Simulate the dynamics for one time step.
                gstate[i + 1], forces, slips = self.dynamics.simulate(
                    gstate[i], gsteers[i], gslip_ratios[i, 0],
                    gslip_ratios[i, 1], tire_model, self.dt)

                # Store the results.
                gforces[i] = pack_force_vector(forces)
                all_slips[i] = slips
        else:
            raise NotImplementedError(
                "Invalid value for self.force_constraint")

        # Declare array for the initial guess and set the values.
        initial_guess = np.empty(self.prog.num_vars())
        self.prog.SetDecisionVariableValueInVector(state, gstate,
                                                   initial_guess)
        self.prog.SetDecisionVariableValueInVector(steers, gsteers,
                                                   initial_guess)
        self.prog.SetDecisionVariableValueInVector(slip_ratios, gslip_ratios,
                                                   initial_guess)
        self.prog.SetDecisionVariableValueInVector(nominal_forces, gforces,
                                                   initial_guess)

        if self.visualize_initial_guess:
            # TODO: reorganize visualizations
            psis = gstate[:, 2]
            xs = gstate[:, 4]
            ys = gstate[:, 5]
            fig, ax = plt.subplots()
            if self.force_constraint != ForceConstraint.NO_PUDDLE:
                plot_puddles(ax, self.puddle_model)
            plot_ellipse_arc(ax, self.ellipse_arc)
            plot_planned_trajectory(ax, xs, ys, psis, gsteers,
                                    self.physical_params)
            # Plot the slip ratios/angles
            plot_slips(all_slips)
            plot_forces(gforces)
            if self.force_constraint == ForceConstraint.NO_PUDDLE:
                generate_animation(xs,
                                   ys,
                                   psis,
                                   gsteers,
                                   self.physical_params,
                                   self.dt,
                                   puddle_model=None)
            else:
                generate_animation(xs,
                                   ys,
                                   psis,
                                   gsteers,
                                   self.physical_params,
                                   self.dt,
                                   puddle_model=self.puddle_model)
        return initial_guess

    def solve_program(self):
        # Generate initial guess
        initial_guess = self.constant_input_initial_guess(
            self.state, self.steers, self.slip_ratios, self.nominal_forces)

        # Solve the problem.
        solver = SnoptSolver()
        result = solver.Solve(self.prog, initial_guess)
        solver_details = result.get_solver_details()
        print("Exit flag: " + str(solver_details.info))

        self.visualize_results(result)

    def visualize_results(self, result):
        state_res = result.GetSolution(self.state)
        psis = state_res[:, 2]
        xs = state_res[:, 4]
        ys = state_res[:, 5]
        steers = result.GetSolution(self.steers)

        fig, ax = plt.subplots()
        plot_ellipse_arc(ax, self.ellipse_arc)
        plot_puddles(ax, self.puddle_model)
        plot_planned_trajectory(ax, xs, ys, psis, steers, self.physical_params)
        generate_animation(xs,
                           ys,
                           psis,
                           steers,
                           self.physical_params,
                           self.dt,
                           puddle_model=self.puddle_model)
        plt.show()

    def add_cost(self, state):
        # Add the final state cost function.
        diff_state = pack_state_vector(self.goal_state) - state[-1]
        self.prog.AddQuadraticCost(diff_state.T @ self.Qf @ diff_state)

        # Get the approx distance function for the ellipse.
        fun = self.ellipse_arc.approx_dist_fun()
        for i in range(self.T):
            s = unpack_state_vector(state[i])
            self.prog.AddCost(self.deviation_cost * fun(s["X"], s["Y"]))

    def add_mean_constrained(self):
        # mean model is a function that maps (slip_ratio, slip_angle, x, y) -> (E[Fx], E[Fy])
        mean_model = self.tire_model.get_mean_model(
            self.puddle_model.get_mean_fun())

        for i in range(self.T):
            # Get slip angles and slip ratios.
            s = unpack_state_vector(self.state[i])
            F = unpack_force_vector(self.nominal_forces[i])
            # get the tire model at this position in space.
            tire_model = lambda slip_ratio, slip_angle: mean_model(
                slip_ratio, slip_angle, s["X"], s["Y"])

            # Unpack values
            delta = self.steers[i]
            alpha_f, alpha_r = self.dynamics.slip_angles(
                s["xdot"], s["ydot"], s["psidot"], delta)
            kappa_f = self.slip_ratios[i, 0]
            kappa_r = self.slip_ratios[i, 1]

            # Compute expected forces.
            E_Ffx, E_Ffy = tire_model(kappa_f, alpha_f)
            E_Frx, E_Fry = tire_model(kappa_r, alpha_r)

            # Constrain these expected force values to be equal to the nominal
            # forces in the optimization.
            self.prog.AddConstraint(E_Ffx - F["f_long"] == 0.0)
            self.prog.AddConstraint(E_Ffy - F["f_lat"] == 0.0)
            self.prog.AddConstraint(E_Frx - F["r_long"] == 0.0)
            self.prog.AddConstraint(E_Fry - F["r_lat"] == 0.0)

    def add_chance_constrained(self):
        pass

    def add_no_puddle_force_constraints(self, state, forces, steers,
                                        pacejka_model, slip_ratios):
        """
        Args:
            prog:
            state:
            forces:
            steers:
            pacejka_model: function with signature (slip_ratio, slip_angle) using pydrake.math
        """
        for i in range(self.T):
            # Get slip angles and slip ratios.
            s = unpack_state_vector(state[i])
            F = unpack_force_vector(forces[i])
            delta = steers[i]
            alpha_f, alpha_r = self.dynamics.slip_angles(
                s["xdot"], s["ydot"], s["psidot"], delta)
            kappa_f = slip_ratios[i, 0]
            kappa_r = slip_ratios[i, 1]
            Ffx, Ffy = pacejka_model(kappa_f, alpha_f)
            Frx, Fry = pacejka_model(kappa_r, alpha_r)

            # Constrain the values from the pacejka model to be equal
            # to the desired values in the optimization.
            self.prog.AddConstraint(Ffx - F["f_long"] == 0.0)
            self.prog.AddConstraint(Ffy - F["f_lat"] == 0.0)
            self.prog.AddConstraint(Frx - F["r_long"] == 0.0)
            self.prog.AddConstraint(Fry - F["r_lat"] == 0.0)
    def compute_trajectory_to_other_world(self, state_initial, minimum_time,
                                          maximum_time):
        '''
        Your mission is to implement this function.

        A successful implementation of this function will compute a dynamically feasible trajectory
        which satisfies these criteria:
            - Efficiently conserve fuel
            - Reach "orbit" of the far right world
            - Approximately obey the dynamic constraints
            - Begin at the state_initial provided
            - Take no more than maximum_time, no less than minimum_time

        The above are defined more precisely in the provided notebook.

        Please note there are two return args.

        :param: state_initial: :param state_initial: numpy array of length 4, see rocket_dynamics for documentation
        :param: minimum_time: float, minimum time allowed for trajectory
        :param: maximum_time: float, maximum time allowed for trajectory

        :return: three return args separated by commas:

            trajectory, input_trajectory, time_array

            trajectory: a 2d array with N rows, and 4 columns. See simulate_states_over_time for more documentation.
            input_trajectory: a 2d array with N-1 row, and 2 columns. See simulate_states_over_time for more documentation.
            time_array: an array with N rows. 

        '''

        # length of horizon (excluding init state)
        N = 50
        trajectory = np.zeros((N + 1, 4))
        input_trajectory = np.ones((N, 2)) * 10.0

        ### My implementation of Direct Transcription
        # (states and control are all decision vars using Euler integration)
        mp = MathematicalProgram()

        # let trajectory duration be a decision var
        total_time = mp.NewContinuousVariables(1, "total_time")
        dt = total_time[0] / N

        # create the control decision var (m*N) and state decision var (n*[N+1])
        idx = 0
        u_list = mp.NewContinuousVariables(2, "u_{}".format(idx))
        state_list = mp.NewContinuousVariables(4, "state_{}".format(idx))
        state_list = np.vstack(
            (state_list,
             mp.NewContinuousVariables(4, "state_{}".format(idx + 1))))
        for idx in range(1, N):
            u_list = np.vstack(
                (u_list, mp.NewContinuousVariables(2, "u_{}".format(idx))))
            state_list = np.vstack(
                (state_list,
                 mp.NewContinuousVariables(4, "state_{}".format(idx + 1))))

        ### Constraints
        # set init state as constraint on stage 0 decision vars
        for state_idx in range(4):
            mp.AddLinearConstraint(
                state_list[0, state_idx] == state_initial[state_idx])

        # interstage equality constraint on state vars via Euler integration
        # note: Direct Collocation could improve accuracy for same computation
        for idx in range(1, N + 1):
            state_new = state_list[idx - 1, :] + dt * self.rocket_dynamics(
                state_list[idx - 1, :], u_list[idx - 1, :])
            for state_idx in range(4):
                mp.AddConstraint(state_list[idx,
                                            state_idx] == state_new[state_idx])

        # constraint on time
        mp.AddLinearConstraint(total_time[0] <= maximum_time)
        mp.AddLinearConstraint(total_time[0] >= minimum_time)

        # constraint on final state distance (squared)to second planet
        final_dist_to_world_2_sq = (
            self.world_2_position[0] - state_list[-1, 0])**2 + (
                self.world_2_position[1] - state_list[-1, 1])**2
        mp.AddConstraint(final_dist_to_world_2_sq <= 0.25)

        # constraint on final state speed (squared
        final_speed_sq = state_list[-1, 2]**2 + state_list[-1, 3]**2
        mp.AddConstraint(final_speed_sq <= 1)

        ### Cost
        # equal cost on vertical/horizontal accels, weight shouldn't matter since this is the only cost
        mp.AddQuadraticCost(1 * u_list[:, 0].dot(u_list[:, 0]))
        mp.AddQuadraticCost(1 * u_list[:, 1].dot(u_list[:, 1]))

        ### Solve and parse
        result = Solve(mp)
        trajectory = result.GetSolution(state_list)
        input_trajectory = result.GetSolution(u_list)
        tf = result.GetSolution(total_time)
        time_array = np.linspace(0, tf[0], N + 1)

        print "optimization successful: ", result.is_success()
        print "total num decision vars (x: (N+1)*4, u: 2N, total_time: 1): {}".format(
            mp.num_vars())
        print "solver used: ", result.get_solver_id().name()
        print "optimal trajectory time: {:.2f} sec".format(tf[0])

        return trajectory, input_trajectory, time_array
Exemple #30
0
    def __init__(self, rtree, q_nom, control_period=0.001):

        self.rtree = rtree
        self.nq = rtree.get_num_positions()
        self.nv = rtree.get_num_velocities()
        self.nu = rtree.get_num_actuators()

        dim = 3  # 3D
        nd = 4  # for friction cone approx, hard coded for now
        self.nc = 4  # number of contacts; TODO don't hard code

        self.nf = self.nc * nd  # number of contact force variables
        self.ncf = 2  # number of loop constraint forces; TODO don't hard code
        self.neps = self.nc * dim  # number of slack variables for contact

        self.q_nom = q_nom
        self.com_des = rtree.centerOfMass(self.rtree.doKinematics(q_nom))
        self.u_des = CassieFixedPointTorque()

        self.lfoot = rtree.FindBody("toe_left")
        self.rfoot = rtree.FindBody("toe_right")
        self.springs = 2 + np.array([
            rtree.FindIndexOfChildBodyOfJoint("knee_spring_left_fixed"),
            rtree.FindIndexOfChildBodyOfJoint("knee_spring_right_fixed"),
            rtree.FindIndexOfChildBodyOfJoint("ankle_spring_joint_left"),
            rtree.FindIndexOfChildBodyOfJoint("ankle_spring_joint_right")
        ])

        umin = np.zeros(self.nu)
        umax = np.zeros(self.nu)
        ii = 0
        for motor in rtree.actuators:
            umin[ii] = motor.effort_limit_min
            umax[ii] = motor.effort_limit_max
            ii += 1

        slack_limit = 10.0
        self.initialized = False

        #------------------------------------------------------------
        # Add Decision Variables ------------------------------------
        prog = MathematicalProgram()
        qddot = prog.NewContinuousVariables(self.nq, "joint acceleration")
        u = prog.NewContinuousVariables(self.nu, "input")
        bar = prog.NewContinuousVariables(self.ncf, "four bar forces")
        beta = prog.NewContinuousVariables(self.nf, "friction forces")
        eps = prog.NewContinuousVariables(self.neps, "slack variable")
        nparams = prog.num_vars()

        #------------------------------------------------------------
        # Problem Constraints ---------------------------------------
        self.con_u_lim = prog.AddBoundingBoxConstraint(umin, umax,
                                                       u).evaluator()
        self.con_fric_lim = prog.AddBoundingBoxConstraint(0, 100000,
                                                          beta).evaluator()
        self.con_slack_lim = prog.AddBoundingBoxConstraint(
            -slack_limit, slack_limit, eps).evaluator()

        bar_con = np.zeros((self.ncf, self.nq))
        self.con_4bar = prog.AddLinearEqualityConstraint(
            bar_con, np.zeros(self.ncf), qddot).evaluator()

        if self.nc > 0:
            dyn_con = np.zeros(
                (self.nq, self.nq + self.nu + self.ncf + self.nf))
            dyn_vars = np.concatenate((qddot, u, bar, beta))
            self.con_dyn = prog.AddLinearEqualityConstraint(
                dyn_con, np.zeros(self.nq), dyn_vars).evaluator()

            foot_con = np.zeros((self.neps, self.nq + self.neps))
            foot_vars = np.concatenate((qddot, eps))
            self.con_foot = prog.AddLinearEqualityConstraint(
                foot_con, np.zeros(self.neps), foot_vars).evaluator()

        else:
            dyn_con = np.zeros((self.nq, self.nq + self.nu + self.ncf))
            dyn_vars = np.concatenate((qddot, u, bar))
            self.con_dyn = prog.AddLinearEqualityConstraint(
                dyn_con, np.zeros(self.nq), dyn_vars).evaluator()

        #------------------------------------------------------------
        # Problem Costs ---------------------------------------------
        self.Kp_com = 50
        self.Kd_com = 2.0 * sqrt(self.Kp_com)
        # self.Kp_qdd = 10*np.eye(self.nq)#np.diag(np.diag(H)/np.max(np.diag(H)))
        self.Kp_qdd = np.diag(
            np.concatenate(([10, 10, 10, 5, 5, 5], np.zeros(self.nq - 6))))
        self.Kd_qdd = 1.0 * np.sqrt(self.Kp_qdd)
        self.Kp_qdd[self.springs, self.springs] = 0
        self.Kd_qdd[self.springs, self.springs] = 0

        com_ddot_des = np.zeros(dim)
        qddot_des = np.zeros(self.nq)

        self.w_com = 0
        self.w_qdd = np.zeros(self.nq)
        self.w_qdd[:6] = 10
        self.w_qdd[8:10] = 1
        self.w_qdd[3:6] = 1000
        # self.w_qdd[self.springs] = 0
        self.w_u = 0.001 * np.ones(self.nu)
        self.w_u[2:4] = 0.01
        self.w_slack = 0.1

        self.cost_qdd = prog.AddQuadraticErrorCost(np.diag(self.w_qdd),
                                                   qddot_des,
                                                   qddot).evaluator()
        self.cost_u = prog.AddQuadraticErrorCost(np.diag(self.w_u), self.u_des,
                                                 u).evaluator()
        self.cost_slack = prog.AddQuadraticErrorCost(
            self.w_slack * np.eye(self.neps), np.zeros(self.neps),
            eps).evaluator()

        # self.cost_com = prog.AddQuadraticCost().evaluator()
        # self.cost_qdd = prog.AddQuadraticCost(
        #     2*np.diag(self.w_qdd), -2*np.matmul(qddot_des, np.diag(self.w_qdd)), qddot).evaluator()
        # self.cost_u = prog.AddQuadraticCost(
        #     2*np.diag(self.w_u), -2*np.matmul(self.u_des, np.diag(self.w_u)) , u).evaluator()
        # self.cost_slack = prog.AddQuadraticCost(
        #     2*self.w_slack*np.eye(self.neps), np.zeros(self.neps), eps).evaluator()

        REG = 1e-8
        self.cost_reg = prog.AddQuadraticErrorCost(
            REG * np.eye(nparams), np.zeros(nparams),
            prog.decision_variables()).evaluator()
        # self.cost_reg = prog.AddQuadraticCost(2*REG*np.eye(nparams),
        #     np.zeros(nparams), prog.decision_variables()).evaluator()

        #------------------------------------------------------------
        # Solver settings -------------------------------------------
        self.solver = GurobiSolver()
        # self.solver = OsqpSolver()
        prog.SetSolverOption(SolverType.kGurobi, "Method", 2)

        #------------------------------------------------------------
        # Save MathProg Variables -----------------------------------
        self.qddot = qddot
        self.u = u
        self.bar = bar
        self.beta = beta
        self.eps = eps
        self.prog = prog