Example #1
0
    def SOS_compute_1(self, S, rho_prev):
        # fix V and rho, search for L and u
        prog = MathematicalProgram()
        x = prog.NewIndeterminates(2, "x")

        # Define u
        K = prog.NewContinuousVariables(2, "K")

        # Fixed Lyapunov
        V = x.dot(np.dot(S, x))
        Vdot = Jacobian([V], x).dot(self.dynamics_K(x, K))[0]

        # Define the Lagrange multipliers.
        (lambda_, constraint) = prog.NewSosPolynomial(Variables(x), 2)
        prog.AddLinearConstraint(K[0] * x[0] <= 2.5)
        prog.AddSosConstraint(-Vdot - lambda_.ToExpression() * (rho_prev - V))

        result = prog.Solve()
        # print(lambda_.ToExpression())
        # print(lambda_.decision_variables())
        lc = [prog.GetSolution(var) for var in lambda_.decision_variables()]
        lbda_coeff = np.ones([3, 3])
        lbda_coeff[0, 0] = lc[0]
        lbda_coeff[0, 1] = lbda_coeff[1, 0] = lc[1]
        lbda_coeff[2, 0] = lbda_coeff[0, 2] = lc[2]
        lbda_coeff[1, 1] = lc[3]
        lbda_coeff[2, 1] = lbda_coeff[1, 2] = lc[4]
        lbda_coeff[2, 2] = lc[5]
        return lbda_coeff
Example #2
0
    def SOS_compute_3(self, K, l_coeff, rho_max=10.):
        prog = MathematicalProgram()
        # fix u and lbda, search for V and rho
        x = prog.NewIndeterminates(2, "x")

        # get lbda from before
        l = np.array([x[1], x[0], 1])
        lbda = l.dot(np.dot(l_coeff, l))

        # rho is decision variable now
        rho = prog.NewContinuousVariables(1, "rho")[0]

        # create lyap V
        s = prog.NewContinuousVariables(4, "s")
        S = np.array([[s[0], s[1]], [s[2], s[3]]])
        V = x.dot(np.dot(S, x))
        Vdot = Jacobian([V], x).dot(self.dynamics_K(x, K))[0]

        prog.AddSosConstraint(V)
        prog.AddSosConstraint(-Vdot - lbda * (rho - V))

        prog.AddLinearCost(-rho)
        prog.AddLinearConstraint(rho <= rho_max)

        prog.Solve()
        rho = prog.GetSolution(rho)
        s = prog.GetSolution(s)
        return s, rho
Example #3
0
    def SOS_compute_2(self, l_coeff, S, rho_max=10.):
        prog = MathematicalProgram()
        # fix V and lbda, searcu for u and rho
        x = prog.NewIndeterminates(2, "x")
        # get lbda from before
        l = np.array([x[1], x[0], 1])
        lbda = l.dot(np.dot(l_coeff, l))

        # Define u
        K = prog.NewContinuousVariables(2, "K")

        # Fixed Lyapunov
        V = x.dot(np.dot(S, x))
        Vdot = Jacobian([V], x).dot(self.dynamics_K(x, K))[0]

        # rho is decision variable now
        rho = prog.NewContinuousVariables(1, "rho")[0]

        prog.AddSosConstraint(-Vdot - lbda * (rho - V))

        prog.AddLinearConstraint(rho <= rho_max)
        prog.AddLinearCost(-rho)
        prog.Solve()
        rho = prog.GetSolution(rho)
        K = prog.GetSolution(K)
        return rho, K
Example #4
0
def run_simple_mathematical_program():
    print "\n\nsimple mathematical program"
    mp = MathematicalProgram()
    x = mp.NewContinuousVariables(1, "x")
    mp.AddLinearCost(x[0] * 1.0)
    mp.AddLinearConstraint(x[0] >= 1)
    print mp.Solve()
    print mp.GetSolution(x)
    print "(finished) simple mathematical program"
Example #5
0
def run_complex_mathematical_program():
    print "\n\ncomplex mathematical program"
    mp = MathematicalProgram()
    x = mp.NewContinuousVariables(1, 'x')
    mp.AddCost(cost, x)
    mp.AddConstraint(quad_constraint, [1.0], [2.0], x)
    mp.SetInitialGuess(x, [1.1])
    print mp.Solve()
    res = mp.GetSolution(x)
    print res

    print "(finished) complex mathematical program"
Example #6
0
def SOS_traj_optim(S, rho_guess):
    # S provides the initial V guess
    # STEP 1: search for L and u with fixed V and p
    mp1 = MathematicalProgram()
    x = mp1.NewIndeterminates(3, "x")
    V = x.dot(np.dot(S, x))
    print(S)
    # Define the Lagrange multipliers.
    (lambda_, constraint) = mp1.NewSosPolynomial(Variables(x), 4)
    xd = mp1.NewFreePolynomial(Variables(x), 2)
    yd = mp1.NewFreePolynomial(Variables(x), 2)
    thetd = mp1.NewFreePolynomial(Variables(x), 2)
    u = np.vstack((xd, yd))
    u = np.vstack((u, thetd))
    Vdot = Jacobian([V], x).dot(plant(x, u))[0]
    mp1.AddSosConstraint(-Vdot + lambda_.ToExpression() * (V - rho_guess))
    result = mp1.Solve()
    # print(type(lambda_).__dict__.keys())
    print(type(lambda_.decision_variables()).__dict__.keys())
    L = [mp1.GetSolution(var) for var in lambda_.decision_variables()]
    # print(lambda_.monomial_to_coefficient_map())
    return L, u
Example #7
0
    def MILP_compute_traj(self,
                          obst_idx,
                          x_out,
                          y_out,
                          dx,
                          dy,
                          pose_initial=[0., 0.]):
        '''
		Find trajectory with MILP
		Outputs trajectory (waypoints) and new K for control 
		'''
        mp = MathematicalProgram()
        N = 8
        k = 0
        # define state traj
        st = mp.NewContinuousVariables(2, "state_%d" % k)
        # # binary variables for obstalces constraint
        c = mp.NewBinaryVariables(4 * self.ang_discret, "c_%d" % k)
        obs = c
        states = st
        for k in range(1, N):
            st = mp.NewContinuousVariables(2, "state_%d" % k)
            states = np.vstack((states, st))
            c = mp.NewBinaryVariables(4 * self.ang_discret, "c_%d" % k)
            obs = np.vstack((obs, c))
        st = mp.NewContinuousVariables(2, "state_%d" % (N + 1))
        states = np.vstack((states, st))
        c = mp.NewBinaryVariables(4 * self.ang_discret, "c_%d" % k)
        obs = np.vstack((obs, c))
        # variables encoding max x y dist from obstacle
        x_margin = mp.NewContinuousVariables(1, "x_margin")
        y_margin = mp.NewContinuousVariables(1, "y_margin")
        ### define cost
        for i in range(N):
            mp.AddLinearCost(-states[i, 0])  # go as far forward as possible
        mp.AddLinearCost(-states[-1, 0])
        mp.AddLinearCost(-x_margin[0])
        mp.AddLinearCost(-y_margin[0])
        # bound x y margin so it doesn't explode
        mp.AddLinearConstraint(x_margin[0] <= 3.)
        mp.AddLinearConstraint(y_margin[0] <= 3.)
        # x y is non ngative adn at least above robot radius
        mp.AddLinearConstraint(x_margin[0] >= 0.05)
        mp.AddLinearConstraint(y_margin[0] >= 0.05)
        M = 1000  # slack var for integer things
        # state constraint
        for i in range(2):  # initial state constraint x y
            mp.AddLinearConstraint(states[0, i] <= pose_initial[i])
            mp.AddLinearConstraint(states[0, i] >= pose_initial[i])
        for i in range(N):
            mp.AddQuadraticCost((states[i + 1, 1] - states[i, 1])**2)
            mp.AddLinearConstraint(states[i + 1, 0] <= states[i, 0] + dx)
            mp.AddLinearConstraint(states[i + 1, 0] >= states[i, 0] - dx)
            mp.AddLinearConstraint(states[i + 1, 1] <= states[i, 1] + dy)
            mp.AddLinearConstraint(states[i + 1, 1] >= states[i, 1] - dy)
            # obstacle constraint
            for j in range(self.ang_discret - 1):
                mp.AddLinearConstraint(sum(obs[i, 4 * j:4 * j + 4]) <= 3)
                ang_min = self.angles[j]  # lower angle bound of obstacle
                ang_max = self.angles[j + 1]  # higher angle bound of obstaclee
                if int(obst_idx[j]) < (self.rng_discret -
                                       1):  # less than max range measured
                    rng_min = self.ranges[int(
                        obst_idx[j])]  # where the obst is at at this angle
                    rng_max = self.ranges[int(obst_idx[j] + 1)]
                    mp.AddLinearConstraint(
                        states[i, 0] <= rng_min - x_margin[0] +
                        M * obs[i, 4 * j])  # xi <= xobs,low + M*c
                    mp.AddLinearConstraint(
                        states[i, 0] >= rng_max + x_margin[0] -
                        M * obs[i, 4 * j + 1])  # xi >= xobs,high - M*c
                    mp.AddLinearConstraint(
                        states[i, 1] <=
                        states[i, 0] * np.tan(ang_min) - y_margin[0] +
                        M * obs[i, 4 * j + 2])  # yi <= xi*tan(ang,min) + M*c
                    mp.AddLinearConstraint(
                        states[i, 1] >=
                        states[i, 0] * np.tan(ang_max) + y_margin[0] -
                        M * obs[i, 4 * j + 3])  # yi >= ci*tan(ang,max) - M*c
        # obstacle constraint for last state
        for j in range(self.ang_discret - 1):
            mp.AddLinearConstraint(sum(obs[N, 4 * j:4 * j + 4]) <= 3)
            ang_min = self.angles[j]  # lower angle bound of obstacle
            ang_max = self.angles[j + 1]  # higher angle bound of obstaclee
            if int(obst_idx[j]) < (self.rng_discret -
                                   1):  # less than max range measured
                rng_min = self.ranges[int(
                    obst_idx[j])]  # where the obst is at at this angle
                rng_max = self.ranges[int(obst_idx[j] + 1)]
                mp.AddLinearConstraint(
                    states[N, 0] <= rng_min - x_margin[0] +
                    M * obs[N, 4 * j])  # xi <= xobs,low + M*c
                mp.AddLinearConstraint(
                    states[N, 0] >= rng_max + x_margin[0] -
                    M * obs[N, 4 * j + 1])  # xi >= xobs,high - M*c
                mp.AddLinearConstraint(
                    states[N,
                           1] <= states[N, 0] * np.tan(ang_min) - y_margin[0] +
                    M * obs[N, 4 * j + 2])  # yi <= xi*tan(ang,min) + M*c
                mp.AddLinearConstraint(
                    states[N,
                           1] >= states[N, 0] * np.tan(ang_max) + y_margin[0] -
                    M * obs[N, 4 * j + 3])  # yi >= ci*tan(ang,max) - M*c

        mp.Solve()

        trajectory = mp.GetSolution(states)
        xm = mp.GetSolution(x_margin)
        ym = mp.GetSolution(y_margin)
        x_out[:] = trajectory[:, 0]
        y_out[:] = trajectory[:, 1]
        return trajectory, xm[0], ym[0]
Example #8
0
    def ComputeControlInput(self, x, t):
        # Set up things you might want...
        q = x[0:self.nq]
        v = x[self.nq:]

        kinsol = self.hand.doKinematics(x)
        M = self.hand.massMatrix(kinsol)
        C = self.hand.dynamicsBiasTerm(kinsol, {}, None)
        B = self.hand.B

        # Assume grasp points are achieved, and calculate
        # contact jacobian at those points, as well as the current
        # grasp points and normals (in WORLD FRAME).
        grasp_points_world_now = self.transform_grasp_points_manipuland(q)
        grasp_normals_world_now = self.transform_grasp_normals_manipuland(q)
        J_manipuland = jacobian(
            self.transform_grasp_points_manipuland, q)
        ee_points_now = self.transform_grasp_points_fingers(q)
        J_manipulator = jacobian(
            self.transform_grasp_points_fingers, q)

        # The contact jacobian (Phi), for contact forces coming from
        # point contacts, describes how the movement of the contact point
        # maps into the joint coordinates of the robot. We can get this
        # by combining the jacobians derived from forward kinematics to each
        # of the two bodies that are in contact, evaluated at the contact point
        # in each body's frame.
        J_contact = J_manipuland - J_manipulator
        # Cut off the 3rd dimension, which should always be 0
        J_contact = J_contact[0:2, :, :]
        
        normals = grasp_normals_world_now[0:2, :].T

        # Given these, the manipulator equations enter as a linear constraint
        # M qdd + C = Bu + J_contact lambda
        # (the combined effects of lambda on the robot).
        # The list of grasp points, in manipuland frame, that we'll use.
        n_cf = len(self.grasp_points)
        # The evaluated desired manipuland posture.
        manipuland_qdes = self.GetDesiredObjectPosition(t)

        # The desired robot (arm) posture, calculated via inverse kinematics
        # to achieve the desired grasp points on the object in its current
        # target posture.
        qdes, info = self.ComputeTargetPosture(x, manipuland_qdes)
        if info != 1:
            if not self.shut_up:
                print "Warning: target posture IK solve got info %d " \
                      "when computing goal posture at least once during " \
                      "simulation. This means the grasp points was hard to " \
                      "achieve given the current object posture. This is " \
                      "occasionally OK, but indicates that your controller " \
                      "is probably struggling a little." % info
                self.shut_up = True

        # From here, it's up to you. Following the guidelines in the problem
        # set, implement a controller to achieve the specified goals.

        '''
        YOUR CODE HERE
        '''
        # This is not proper orthogonal vector...but it somehow makes it work...
        #    whereas the correct does not
        def ortho2(x):
            return np.array([x[0],-x[1]])
            #return np.array([x[1],-x[0]])

        prog = MathematicalProgram()
        
        dt = self.control_period
        
        u = prog.NewContinuousVariables(self.nu, "u")
        
        qdd = prog.NewContinuousVariables(q.shape[0], "qdd")
        for i in range(qdd.shape[0]):
            prog.AddLinearConstraint(qdd[i] <=  40)
            prog.AddLinearConstraint(qdd[i] >= -40)

            #prog.AddLinearConstraint(v[i] + qdd[i] * dt <=   80)
            #prog.AddLinearConstraint(v[i] - qdd[i] * dt >=  -80)
            
        beta = prog.NewContinuousVariables(n_cf, 2, "beta")
        #prog.AddQuadraticCost(0.1*np.sum(beta**2))
        for i in range(n_cf):
            prog.AddLinearConstraint(beta[i,0] >= 0)
            prog.AddLinearConstraint(beta[i,1] >= 0)
            prog.AddLinearConstraint(beta[i,0] <= 10)
            prog.AddLinearConstraint(beta[i,1] <= 10)
            
        c = np.zeros((n_cf,2,2))

        for i in range(n_cf):
            c[i,0] = normals[i] - self.mu * ortho2(normals[i])
            c[i,1] = normals[i] + self.mu * ortho2(normals[i])

        const = M.dot(qdd) + C - B.dot(u) ## eq 0

        for i in range(n_cf):
            lambda_i = c[i,0]*beta[i,0] + c[i,1]*beta[i,1]
            tmp = J_contact[:, i, :].T.dot(lambda_i)
            const -= tmp

        for i in range(const.shape[0]):
            prog.AddLinearConstraint(const[i] == 0.0)
        
        Kp = 1000
        Kd = 10
        
        #v2 = v + dt * qdd
        #q2 = q + v * dt + 0.5 * qdd * dt*dt
        
        v2 = v + dt * qdd
        q2 = q + (v+v2)/2 * dt #+ 0.5 * qdd * dt*dt

        #v2 = v + dt * qdd
        #q2 = q + v2 * dt
        prog.AddQuadraticCost(Kp * np.sum((qdes - q2) ** 2) + Kd * np.sum(v2**2))
        
        result = prog.Solve()
        u = prog.GetSolution(u)
        return u
Example #9
0
    def PlanGraspPoints(self):
        # First, extract the bounding geometry of the object.
        # Generally, our geometry is coming from 3d models, so
        # we have to do some legwork to extract 2D geometry. For
        # the examples we'll use in this set, we'll assume
        # that extracting the convex hull of the first visual element
        # is a good representation of the object geometry. (This is
        # not great! But it'll do the job for us, since we're going
        # to experiment with only simple objects.)
        kinsol = self.hand.doKinematics(self.x_nom)
        self.manipuland_link_index = \
            self.hand.FindBody(self.manipuland_link_name).get_body_index()
        body = self.hand.get_body(self.manipuland_link_index)
        # For projecting into XY plane
        Tview = np.array([[1., 0., 0., 0.],
                          [0., 1., 0., 0.],
                          [0., 0., 0., 1.]])
        all_points = ExtractPlanarObjectGeometryConvexHull(body, Tview)

        # For later use: precompute the fingertip positions of the
        # robot in the nominal posture.
        nominal_fingertip_points = np.empty((2, self.num_fingers))
        for i in range(self.num_fingers):
            nominal_fingertip_points[:, i] = self.hand.transformPoints(
                kinsol, self.fingertip_position, 
                self.finger_link_indices[i], 0)[0:2, 0]

        # Search for an optimal grasp with N points
        random.seed(42)
        np.random.seed(42)

        def get_random_point_and_normal_on_surface(all_points):
            num_points = all_points.shape[1]
            i = random.choice(range(num_points-1))
            first_point = np.asarray([all_points[0][i], all_points[1][i]])
            second_point = np.asarray([all_points[0][i+1], all_points[1][i+1]])
            first_to_second = second_point - first_point
            # Clip to avoid interpolating close to a corner
            interpolate_param = np.clip(np.random.rand(), 0.2, 0.8)
            rand_point = first_point + interpolate_param*first_to_second

            normal = np.array([-first_to_second[1], first_to_second[0]]) \
                / np.linalg.norm(first_to_second)
            return rand_point, normal

        best_conv_volume = 0
        best_points = []
        best_normals = []
        best_finger_assignments = []
        for i in range(self.n_grasp_search_iters):
            grasp_points = []
            normals = []
            for j in range(self.num_fingers):
                point, normal = \
                    get_random_point_and_normal_on_surface(all_points)
                # check for duplicates or close points -- fingertip
                # radius is 0.2, so require twice that margin to avoid
                # intersection fingertips.
                num_rejected = 0
                while min([1.0] + [np.linalg.norm(grasp_point - point, 2)
                                   for grasp_point in grasp_points]) <= 0.4:
                    point, normal = \
                        get_random_point_and_normal_on_surface(all_points)
                    num_rejected += 1
                    if num_rejected > 10000:
                        print "Rejected 10000 points in a row due to crowding." \
                              " Your object is a bit too small for your number of" \
                              " fingers."
                        break
                grasp_points.append(point)
                normals.append(normal)
            if achieves_force_closure(grasp_points, normals, self.mu):
                # Test #1: Rank in terms of convex hull volume of grasp points
                volume = compute_convex_hull_volume(grasp_points)
                if volume < best_conv_volume:
                    continue
                    
                # Test #2: Does IK work for this point?
                # TODO(gizatt) Make IK call accept these things
                # as arguments rather than passing them through the object
                # state.
                self.grasp_points = grasp_points
                self.grasp_normals = normals

                # Pick optimal finger assignment that
                # minimizes distance between fingertips in the
                # nominal posture, and the chosen grasp points
                grasp_points_world = self.transform_grasp_points_manipuland(
                    self.x_nom)[0:2, :]

                prog = MathematicalProgram()
                # We'd *really* like these to be binary variables,
                # but unfortunately don't have a MIP solver available in the
                # course docker container. Instead, we'll solve an LP,
                # and round the result to the nearest feasible integer
                # solutions. Intuitively, this LP should probably reach its
                # optimal value at an extreme point (where the variables
                # all take integer value). I've not yet observed this
                # not occuring in practice!
                assignment_vars = prog.NewContinuousVariables(
                    self.num_fingers, self.num_fingers, "assignment")
                for grasp_i in range(self.num_fingers):
                    # Every row and column of assignment vars sum to one --
                    # each finger matches to one grasp position
                    prog.AddLinearConstraint(
                        np.sum(assignment_vars[:, grasp_i]) == 1.)
                    prog.AddLinearConstraint(
                        np.sum(assignment_vars[grasp_i, :]) == 1.)
                    for finger_i in range(self.num_fingers):
                        # If this grasp assignment is active,
                        # add a cost equal to the distance between
                        # nominal pose and grasp position
                        prog.AddLinearCost(
                            assignment_vars[grasp_i, finger_i] *
                            np.linalg.norm(
                                grasp_points_world[:, grasp_i] -
                                nominal_fingertip_points[:, finger_i]))
                        prog.AddBoundingBoxConstraint(
                            0., 1., assignment_vars[grasp_i, finger_i])
                result = prog.Solve()
                assignments = prog.GetSolution(assignment_vars)
                # Round assignments to nearest feasible set
                claimed = [False]*self.num_fingers
                for grasp_i in range(self.num_fingers):
                    order = np.argsort(assignments[grasp_i, :])
                    fill_i = self.num_fingers - 1
                    while claimed[order[fill_i]] is not False:
                        fill_i -= 1
                    if fill_i < 0:
                        raise Exception("Finger association failed. "
                                        "Horrible bug. Tell Greg")
                    assignments[grasp_i, :] *= 0.
                    assignments[grasp_i, order[fill_i]] = 1.
                    claimed[order[fill_i]] = True

                # Populate actual assignments
                self.grasp_finger_assignments = []
                for grasp_i in range(self.num_fingers):
                    for finger_i in range(self.num_fingers):
                        if assignments[grasp_i, finger_i] == 1.:
                            self.grasp_finger_assignments.append(
                                (finger_i, 
                                 np.array(self.fingertip_position)))
                            continue

                qinit, info = self.ComputeTargetPosture(
                                self.x_nom, self.x_nom[(self.nq-3):self.nq],
                                backoff_distance=0.2)
                if info != 1:
                    continue

                best_conv_volume = volume
                best_points = grasp_points
                best_normals = normals
                best_finger_assignments = self.grasp_finger_assignments

        if len(best_points) == 0:
            print "After %d attempts, couldn't find a good grasp "\
                  "for this object." % self.n_grasp_search_iters
            print "Proceeding with a horrible random guess."
            best_points = [np.random.uniform(-1., 1., 2)
                           for i in range(self.num_fingers)]
            best_normals = [np.random.uniform(-1., 1., 2)
                            for i in range(self.num_fingers)]
            best_finger_assignments = [(i, self.fingertip_position)
                                       for i in range(self.num_fingers)]

        self.grasp_points = best_points
        self.grasp_normals = best_normals
        self.grasp_finger_assignments = best_finger_assignments
Example #10
0
    def compute_trajectory(self,
                           obst_idx,
                           x_out,
                           y_out,
                           ux_out,
                           uy_out,
                           pose_initial=[0., 0., 0., 0.],
                           dt=0.05):
        '''
		Find trajectory with MILP
		input u are tyhe velocities (xd, yd)
		dt 0.05 according to a rate of 20 Hz
		'''
        mp = MathematicalProgram()
        N = 30
        k = 0
        # define input trajectory and state traj
        u = mp.NewContinuousVariables(2, "u_%d" % k)  # xd yd
        input_trajectory = u
        st = mp.NewContinuousVariables(4, "state_%d" % k)
        # # binary variables for obstalces constraint
        c = mp.NewBinaryVariables(4 * self.ang_discret, "c_%d" % k)
        obs = c
        states = st
        for k in range(1, N):
            u = mp.NewContinuousVariables(2, "u_%d" % k)
            input_trajectory = np.vstack((input_trajectory, u))
            st = mp.NewContinuousVariables(4, "state_%d" % k)
            states = np.vstack((states, st))
            c = mp.NewBinaryVariables(4 * self.ang_discret, "c_%d" % k)
            obs = np.vstack((obs, c))
        st = mp.NewContinuousVariables(4, "state_%d" % (N + 1))
        states = np.vstack((states, st))
        c = mp.NewBinaryVariables(4 * self.ang_discret, "c_%d" % k)
        obs = np.vstack((obs, c))
        ### define cost
        mp.AddLinearCost(100 *
                         (-states[-1, 0]))  # go as far forward as possible
        # mp.AddQuadraticCost(states[-1,1]*states[-1,1])
        # time constraint
        M = 1000  # slack var for obst costraint
        # state constraint
        for i in range(2):  # initial state constraint x y yaw
            mp.AddLinearConstraint(states[0, i] <= pose_initial[i])
            mp.AddLinearConstraint(states[0, i] >= pose_initial[i])
        for i in range(2):  # initial state constraint xd yd yawd
            mp.AddLinearConstraint(states[0, i] <= pose_initial[2 + i] + 1)
            mp.AddLinearConstraint(states[0, i] >= pose_initial[2 + i] - 1)
        for i in range(N):
            # state update according to dynamics
            state_next = self.quad_dynamics(states[i, :],
                                            input_trajectory[i, :], dt)
            for j in range(4):
                mp.AddLinearConstraint(states[i + 1, j] <= state_next[j])
                mp.AddLinearConstraint(states[i + 1, j] >= state_next[j])
            # obstacle constraint
            for j in range(self.ang_discret - 1):
                mp.AddLinearConstraint(sum(obs[i, 4 * j:4 * j + 4]) <= 3)
                ang_min = self.angles[j]  # lower angle bound of obstacle
                ang_max = self.angles[j + 1]  # higher angle bound of obstaclee
                if int(obst_idx[j]) < (self.rng_discret -
                                       1):  # less than max range measured
                    rng_min = self.ranges[int(
                        obst_idx[j])]  # where the obst is at at this angle
                    rng_max = self.ranges[int(obst_idx[j] + 1)]
                    mp.AddLinearConstraint(
                        states[i, 0] <= rng_min - 0.05 +
                        M * obs[i, 4 * j])  # xi <= xobs,low + M*c
                    mp.AddLinearConstraint(
                        states[i, 0] >= rng_max + 0.005 -
                        M * obs[i, 4 * j + 1])  # xi >= xobs,high - M*c
                    mp.AddLinearConstraint(
                        states[i, 1] <= states[i, 0] * np.tan(ang_min) - 0.05 +
                        M * obs[i, 4 * j + 2])  # yi <= xi*tan(ang,min) + M*c
                    mp.AddLinearConstraint(
                        states[i, 1] >= states[i, 0] * np.tan(ang_max) + 0.05 -
                        M * obs[i, 4 * j + 3])  # yi >= ci*tan(ang,max) - M*c
            # environmnt constraint, dont leave fov
            mp.AddLinearConstraint(
                states[i, 1] >= states[i, 0] * np.tan(-self.theta))
            mp.AddLinearConstraint(
                states[i, 1] <= states[i, 0] * np.tan(self.theta))
            # bound the inputs
            # mp.AddConstraint(input_trajectory[i,:].dot(input_trajectory[i,:]) <= 2.5*2.5) # dosnt work with multi int
            mp.AddLinearConstraint(input_trajectory[i, 0] <= 2.5)
            mp.AddLinearConstraint(input_trajectory[i, 0] >= -2.5)
            mp.AddLinearConstraint(input_trajectory[i, 1] <= 0.5)
            mp.AddLinearConstraint(input_trajectory[i, 1] >= -0.5)

        mp.Solve()

        input_trajectory = mp.GetSolution(input_trajectory)
        trajectory = mp.GetSolution(states)
        x_out[:] = trajectory[:, 0]
        y_out[:] = trajectory[:, 1]
        ux_out[:] = input_trajectory[:, 0]
        uy_out[:] = input_trajectory[:, 1]
        return trajectory, input_trajectory
Example #11
0
def quadratic_program(H, f, A, b, C=None, d=None, tol=1.e-5, **kwargs):
    """
    Solves the strictly convex (H > 0) quadratic program min .5 x' H x + f' x s.t. A x <= b, C x  = d.

    Arguments
    ----------
    H : numpy.ndarray
        Positive definite Hessian of the cost function.
    f : numpy.ndarray
        Gradient of the cost function.
    A : numpy.ndarray
        Left-hand side of the inequality constraints.
    b : numpy.ndarray
        Right-hand side of the inequality constraints.
    C : numpy.ndarray
        Left-hand side of the equality constraints.
    d : numpy.ndarray
        Right-hand side of the equality constraints.
    tol : float
        Maximum value of a residual of an inequality to consider the related constraint active.

    Returns
    ----------
    sol : dict
        Dictionary with the solution of the QP.

        Fields
        ----------
        min : float
            Minimum of the QP (None if the problem is unfeasible).
        argmin : numpy.ndarray
            Argument that minimizes the QP (None if the problem is unfeasible).
        active_set : list of int
            Indices of the active inequallities {i | A_i argmin = b} (None if the problem is unfeasible).
        multiplier_inequality : numpy.ndarray
            Lagrange multipliers for the inequality constraints (None if the problem is unfeasible).
        multiplier_equality : numpy.ndarray
            Lagrange multipliers for the equality constraints (None if the problem is unfeasible or without equality constraints).
    """

    # check equalities
    if (C is None) != (d is None):
        raise ValueError('missing C or d.')

    # problem size
    n_ineq, n_x = A.shape
    if C is not None:
        n_eq = C.shape[0]
    else:
        n_eq = 0

    # build program
    prog = MathematicalProgram()
    x = prog.NewContinuousVariables(n_x)
    [prog.AddLinearConstraint(A[i].dot(x) <= b[i]) for i in range(n_ineq)]
    [prog.AddLinearConstraint(C[i].dot(x) == d[i]) for i in range(n_eq)]
    inequalities = []
    prog.AddQuadraticCost(.5*x.dot(H).dot(x) + f.dot(x))

    # solve
    solver = GurobiSolver()
    prog.SetSolverOption(solver.solver_type(), 'OutputFlag', 0)
    [prog.SetSolverOption(solver.solver_type(), parameter, value) for parameter, value in kwargs.items()]
    result = prog.Solve()

    # initialize output
    sol = {
        'min': None,
        'argmin': None,
        'active_set': None,
        'multiplier_inequality': None,
        'multiplier_equality': None
    }

    if result == SolutionResult.kSolutionFound:
        sol['argmin'] = prog.GetSolution(x)
        sol['min'] = .5*sol['argmin'].dot(H).dot(sol['argmin']) + f.dot(sol['argmin'])
        sol['active_set'] = np.where(A.dot(sol['argmin']) - b > -tol)[0].tolist()

        # retrieve multipliers through KKT conditions
        lhs = A[sol['active_set']]
        rhs = b[sol['active_set']]
        if n_eq > 0:
            lhs = np.vstack((lhs, C))
            rhs = np.concatenate((rhs, d))
        H_inv = np.linalg.inv(H)
        M = lhs.dot(H_inv).dot(lhs.T)
        m = - np.linalg.inv(M).dot(lhs.dot(H_inv).dot(f) + rhs)
        sol['multiplier_inequality'] = np.zeros(n_ineq)
        for i, j in enumerate(sol['active_set']):
            sol['multiplier_inequality'][j] = m[i]
        if n_eq > 0:
            sol['multiplier_equality'] = m[len(sol['active_set']):]

    return sol
Example #12
0
phi0 = -36332.36234347365

print("Qp : ", Quadratic_Positive_def)
print("Qp det: ", QP_det)

# Quadratic cost : u^TQu + c^Tu
CLF_QP_cost_v_effective = np.dot(u_var, np.dot(Quadratic_Positive_def,
                                               u_var)) + np.dot(c, u_var)

prog.AddQuadraticCost(CLF_QP_cost_v_effective)
prog.AddConstraint(np.dot(d, u_var) + phi0 <= 0)

solver = IpoptSolver()

prog.Solve()
# solver.Solve(prog)
print("Optimal u : ", prog.GetSolution(u_var))
u_CLF_QP = prog.GetSolution(u_var)

# ('A_fl:', )
# ('A_fl_det:', 137180180557.17741)
# ('Qp : ', array([[ 1.0000e+00, -1.5475e-13,  4.0035e-14,  3.7932e-15],
#        [-1.5475e-13,  5.7298e+07,  2.1803e+05, -3.3461e+06],
#        [ 4.0035e-14,  2.1803e+05,  6.2061e+07,  3.5442e+07],
#        [ 3.7932e-15, -3.3461e+06,  3.5442e+07,  2.5742e+07]]))
# ('Qp det: ', 1.8818401937699662e+22)
# ('c_QP', array([-8.3592e+00, -8.3708e+06, -5.1451e+05,  2.0752e+05]))
# ('phi0_exp: ', -36332.36234347365)
# ('d : ', array([5.0752e+01, 4.7343e+05, 8.4125e+05, 6.2668e+05]))
Example #13
0
    def compute_opt_trajectory(self, state_initial, goal_func, verbose=True):
        '''
        nonlinear trajectory optimization to land drone starting at state_initial, on a vehicle target trajectory given by the goal_func

        :return: three return args separated by commas:

            trajectory, input_trajectory, time_array

            trajectory: a 2d array with N rows, and 6 columns. See simulate_states_over_time for more documentation.
            input_trajectory: a 2d array with N-1 row, and 4 columns. See simulate_states_over_time for more documentation.
            time_array: an array with N rows. 
        '''
        # initialize math program
        import time
        start_time = time.time()
        mp = MathematicalProgram()
        num_time_steps = 40

        booster = mp.NewContinuousVariables(3, "booster_0")
        booster_over_time = booster[np.newaxis, :]

        state = mp.NewContinuousVariables(6, "state_0")
        state_over_time = state[np.newaxis, :]

        dt = mp.NewContinuousVariables(1, "dt")

        for k in range(1, num_time_steps - 1):
            booster = mp.NewContinuousVariables(3, "booster_%d" % k)
            booster_over_time = np.vstack((booster_over_time, booster))
        for k in range(1, num_time_steps):
            state = mp.NewContinuousVariables(6, "state_%d" % k)
            state_over_time = np.vstack((state_over_time, state))

        goal_state = goal_func(dt[0] * 39)

        # calculate states over time
        for i in range(1, num_time_steps):
            sim_next_state = state_over_time[
                i - 1, :] + dt[0] * self.drone_dynamics(
                    state_over_time[i - 1, :], booster_over_time[i - 1, :])

            # add constraints to restrict the next state to the decision variables
            for j in range(6):
                mp.AddConstraint(sim_next_state[j] == state_over_time[i, j])

            # don't hit ground
            mp.AddLinearConstraint(state_over_time[i, 2] >= 0.05)

            # enforce that we must be thrusting within some constraints
            mp.AddLinearConstraint(booster_over_time[i - 1, 0] <= 5.0)
            mp.AddLinearConstraint(booster_over_time[i - 1, 0] >= -5.0)
            mp.AddLinearConstraint(booster_over_time[i - 1, 1] <= 5.0)
            mp.AddLinearConstraint(booster_over_time[i - 1, 1] >= -5.0)

            # keep forces in a reasonable position
            mp.AddLinearConstraint(booster_over_time[i - 1, 2] >= 1.0)
            mp.AddLinearConstraint(
                booster_over_time[i - 1, 0] <= booster_over_time[i - 1, 2])
            mp.AddLinearConstraint(
                booster_over_time[i - 1, 0] >= -booster_over_time[i - 1, 2])
            mp.AddLinearConstraint(
                booster_over_time[i - 1, 1] <= booster_over_time[i - 1, 2])
            mp.AddLinearConstraint(
                booster_over_time[i - 1, 1] >= -booster_over_time[i - 1, 2])

        # add constraints on initial state
        for i in range(6):
            mp.AddLinearConstraint(state_over_time[0, i] == state_initial[i])

        # add constraints on dt
        mp.AddLinearConstraint(dt[0] >= 0.001)

        # add constraints on end state

        # end goal velocity
        mp.AddConstraint(state_over_time[-1, 0] <= goal_state[0] + 0.01)
        mp.AddConstraint(state_over_time[-1, 0] >= goal_state[0] - 0.01)
        mp.AddConstraint(state_over_time[-1, 1] <= goal_state[1] + 0.01)
        mp.AddConstraint(state_over_time[-1, 1] >= goal_state[1] - 0.01)
        mp.AddConstraint(state_over_time[-1, 2] <= goal_state[2] + 0.01)
        mp.AddConstraint(state_over_time[-1, 2] >= goal_state[2] - 0.01)
        mp.AddConstraint(state_over_time[-1, 3] <= goal_state[3] + 0.01)
        mp.AddConstraint(state_over_time[-1, 3] >= goal_state[3] - 0.01)
        mp.AddConstraint(state_over_time[-1, 4] <= goal_state[4] + 0.01)
        mp.AddConstraint(state_over_time[-1, 4] >= goal_state[4] - 0.01)
        mp.AddConstraint(state_over_time[-1, 5] <= goal_state[5] + 0.01)
        mp.AddConstraint(state_over_time[-1, 5] >= goal_state[5] - 0.01)

        # add the cost function
        mp.AddQuadraticCost(
            0.01 * booster_over_time[:, 0].dot(booster_over_time[:, 0]))
        mp.AddQuadraticCost(
            0.01 * booster_over_time[:, 1].dot(booster_over_time[:, 1]))
        mp.AddQuadraticCost(
            0.01 * booster_over_time[:, 2].dot(booster_over_time[:, 2]))

        # add more penalty on dt because minimizing time turns out to be more important
        mp.AddQuadraticCost(10000 * dt[0] * dt[0])

        solved = mp.Solve()
        if verbose:
            print solved

        # extract
        booster_over_time = mp.GetSolution(booster_over_time)
        output_states = mp.GetSolution(state_over_time)
        dt = mp.GetSolution(dt)

        time_array = np.zeros(40)
        for i in range(40):
            time_array[i] = i * dt
        trajectory = self.simulate_states_over_time(state_initial, time_array,
                                                    booster_over_time)

        durations = time_array[1:len(time_array
                                     )] - time_array[0:len(time_array) - 1]
        fuel_consumption = (
            np.sum(booster_over_time[:len(time_array)]**2, axis=1) *
            durations).sum()

        if verbose:
            print 'expected remaining fuel consumption', fuel_consumption
            print("took %s seconds" % (time.time() - start_time))
            print ''

        return trajectory, booster_over_time, time_array, fuel_consumption
Example #14
0
def linear_program(f, A, b, C=None, d=None, tol=1.e-5):
    """
    Solves the linear program min_x f^T x s.t. A x <= b, C x = d.

    Arguments
    ----------
    f : numpy.ndarray
        Gradient of the cost function.
    A : numpy.ndarray
        Left-hand side of the inequality constraints.
    b : numpy.ndarray
        Right-hand side of the inequality constraints.
    C : numpy.ndarray
        Left-hand side of the equality constraints.
    d : numpy.ndarray
        Right-hand side of the equality constraints.
    tol : float
        Maximum value of a residual of an inequality to consider the related constraint active.

    Returns
    ----------
    sol : dict
        Dictionary with the solution of the LP.

        Fields
        ----------
        min : float
            Minimum of the LP (None if the problem is unfeasible or unbounded).
        argmin : numpy.ndarray
            Argument that minimizes the LP (None if the problem is unfeasible or unbounded).
        active_set : list of int
            Indices of the active inequallities {i | A_i argmin = b} (None if the problem is unfeasible or unbounded).
        multiplier_inequality : numpy.ndarray
            Lagrange multipliers for the inequality constraints (None if the problem is unfeasible or unbounded).
        multiplier_equality : numpy.ndarray
            Lagrange multipliers for the equality constraints (None if the problem is unfeasible or unbounded or without equality constraints).
    """

    # check equalities
    if (C is None) != (d is None):
        raise ValueError('missing C or d.')

    # problem size
    n_ineq, n_x = A.shape
    if C is not None:
        n_eq = C.shape[0]
    else:
        n_eq = 0

    # reshape inputs
    if len(f.shape) == 2:
        f = np.reshape(f, f.shape[0])

    # build program
    prog = MathematicalProgram()
    x = prog.NewContinuousVariables(n_x)
    inequalities = []
    for i in range(n_ineq):
        lhs = A[i, :] + 1.e-20 * np.random.rand(
            (n_x)
        )  # drake raises a RuntimeError if the in the expression x does not appear (e.g.: 0 x <= 1)
        rhs = b[i] + 1.e-15 * np.random.rand(
            1
        )  # in case the constraint is 0 x <= 0 the previous trick ends up adding the constraint x <= 0 to the program...
        inequalities.append(prog.AddLinearConstraint(lhs.dot(x) <= rhs))
    for i in range(n_eq):
        prog.AddLinearConstraint(C[i, :].dot(x) == d[i])
    prog.AddLinearCost(f.dot(x))

    # solve
    solver = GurobiSolver()
    prog.SetSolverOption(solver.solver_type(), "OutputFlag", 0)
    result = prog.Solve()

    # initialize output
    sol = {
        'min': None,
        'argmin': None,
        'active_set': None,
        'multiplier_inequality': None,
        'multiplier_equality': None
    }

    if result == SolutionResult.kSolutionFound:
        sol['argmin'] = prog.GetSolution(x).reshape(n_x, 1)
        sol['min'] = f.dot(sol['argmin'])[0]
        sol['active_set'] = np.where(
            A.dot(sol['argmin']) - b > -tol)[0].tolist()

        # retrieve multipliers through KKT conditions
        M = A[sol['active_set'], :].T
        if n_eq > 0:
            M = np.hstack((M, C.T))
        m = np.linalg.pinv(M).dot(-f.reshape(n_x, 1))
        sol['multiplier_inequality'] = np.zeros((n_ineq, 1))
        for i, j in enumerate(sol['active_set']):
            sol['multiplier_inequality'][j, 0] = m[i, :]
        if n_eq > 0:
            sol['multiplier_equality'] = m[len(sol['active_set']):, :]

    return sol
Example #15
0
    def compute_trajectory(self,
                           state_initial,
                           goal_state,
                           flight_time,
                           exact=False,
                           verbose=True):
        '''
        nonlinear trajectory optimization to land drone starting at state_initial, to a goal_state, in a specific flight_time

        :return: three return args separated by commas:

            trajectory, input_trajectory, time_array

            trajectory: a 2d array with N rows, and 6 columns. See simulate_states_over_time for more documentation.
            input_trajectory: a 2d array with N-1 row, and 4 columns. See simulate_states_over_time for more documentation.
            time_array: an array with N rows. 

        '''
        # initialize math program
        import time
        start_time = time.time()
        mp = MathematicalProgram()
        num_time_steps = int(min(40, flight_time / 0.05))
        dt = flight_time / num_time_steps
        time_array = np.arange(0.0, flight_time - 0.00001,
                               dt)  # hacky way to ensure it goes down
        num_time_steps = len(time_array)  # to ensure these are equal lenghts
        flight_time = dt * num_time_steps

        if verbose:
            print ''
            print 'solving problem with no guess'
            print 'initial state', state_initial
            print 'goal state', goal_state
            print 'flight time', flight_time
            print 'num time steps', num_time_steps
            print 'exact traj', exact
            print 'dt', dt

        booster = mp.NewContinuousVariables(3, "booster_0")
        booster_over_time = booster[np.newaxis, :]

        state = mp.NewContinuousVariables(6, "state_0")
        state_over_time = state[np.newaxis, :]

        for k in range(1, num_time_steps - 1):
            booster = mp.NewContinuousVariables(3, "booster_%d" % k)
            booster_over_time = np.vstack((booster_over_time, booster))
        for k in range(1, num_time_steps):
            state = mp.NewContinuousVariables(6, "state_%d" % k)
            state_over_time = np.vstack((state_over_time, state))

        # calculate states over time
        for i in range(1, len(time_array)):
            time_step = time_array[i] - time_array[i - 1]
            sim_next_state = state_over_time[
                i - 1, :] + time_step * self.drone_dynamics(
                    state_over_time[i - 1, :], booster_over_time[i - 1, :])

            # add constraints to restrict the next state to the decision variables
            for j in range(6):
                mp.AddLinearConstraint(sim_next_state[j] == state_over_time[i,
                                                                            j])

            # don't hit ground
            mp.AddLinearConstraint(state_over_time[i, 2] >= 0.05)

            # enforce that we must be thrusting within some constraints
            mp.AddLinearConstraint(booster_over_time[i - 1, 0] <= 5.0)
            mp.AddLinearConstraint(booster_over_time[i - 1, 0] >= -5.0)
            mp.AddLinearConstraint(booster_over_time[i - 1, 1] <= 5.0)
            mp.AddLinearConstraint(booster_over_time[i - 1, 1] >= -5.0)

            # keep forces in a reasonable position
            mp.AddLinearConstraint(booster_over_time[i - 1, 2] >= 1.0)
            mp.AddLinearConstraint(booster_over_time[i - 1, 2] <= 15.0)
            mp.AddLinearConstraint(
                booster_over_time[i - 1, 0] <= booster_over_time[i - 1, 2])
            mp.AddLinearConstraint(
                booster_over_time[i - 1, 0] >= -booster_over_time[i - 1, 2])
            mp.AddLinearConstraint(
                booster_over_time[i - 1, 1] <= booster_over_time[i - 1, 2])
            mp.AddLinearConstraint(
                booster_over_time[i - 1, 1] >= -booster_over_time[i - 1, 2])

        # add constraints on initial state
        for i in range(6):
            mp.AddLinearConstraint(state_over_time[0, i] == state_initial[i])

        # add constraints on end state
        # 100 should be a lower constant...
        mp.AddLinearConstraint(
            state_over_time[-1, 0] <= goal_state[0] + flight_time / 100.)
        mp.AddLinearConstraint(
            state_over_time[-1, 0] >= goal_state[0] - flight_time / 100.)
        mp.AddLinearConstraint(
            state_over_time[-1, 1] <= goal_state[1] + flight_time / 100.)
        mp.AddLinearConstraint(
            state_over_time[-1, 1] >= goal_state[1] - flight_time / 100.)
        mp.AddLinearConstraint(
            state_over_time[-1, 2] <= goal_state[2] + flight_time / 100.)
        mp.AddLinearConstraint(
            state_over_time[-1, 2] >= goal_state[2] - flight_time / 100.)
        mp.AddLinearConstraint(
            state_over_time[-1, 3] <= goal_state[3] + flight_time / 100.)
        mp.AddLinearConstraint(
            state_over_time[-1, 3] >= goal_state[3] - flight_time / 100.)
        mp.AddLinearConstraint(
            state_over_time[-1, 4] <= goal_state[4] + flight_time / 100.)
        mp.AddLinearConstraint(
            state_over_time[-1, 4] >= goal_state[4] - flight_time / 100.)
        mp.AddLinearConstraint(
            state_over_time[-1, 5] <= goal_state[5] + flight_time / 100.)
        mp.AddLinearConstraint(
            state_over_time[-1, 5] >= goal_state[5] - flight_time / 100.)

        # add the cost function
        mp.AddQuadraticCost(
            10. * booster_over_time[:, 0].dot(booster_over_time[:, 0]))
        mp.AddQuadraticCost(
            10. * booster_over_time[:, 1].dot(booster_over_time[:, 1]))
        mp.AddQuadraticCost(
            10. * booster_over_time[:, 2].dot(booster_over_time[:, 2]))

        for i in range(1, len(time_array) - 1):
            cost_multiplier = np.exp(
                4.5 * i / (len(time_array) -
                           1))  # exp starting at 1 and going to around 90
            # penalize difference_in_state
            dist_to_goal_pos = goal_state[:3] - state_over_time[i - 1, :3]
            mp.AddQuadraticCost(cost_multiplier *
                                dist_to_goal_pos.dot(dist_to_goal_pos))
            # penalize difference in velocity
            if exact:
                dist_to_goal_vel = goal_state[-3:] - state_over_time[i - 1,
                                                                     -3:]
                mp.AddQuadraticCost(cost_multiplier / 2. *
                                    dist_to_goal_vel.dot(dist_to_goal_vel))
            else:
                dist_to_goal_vel = goal_state[-3:] - state_over_time[i - 1,
                                                                     -3:]
                mp.AddQuadraticCost(cost_multiplier / 6. *
                                    dist_to_goal_vel.dot(dist_to_goal_vel))

        solved = mp.Solve()
        if verbose:
            print solved

        # extract
        booster_over_time = mp.GetSolution(booster_over_time)

        output_states = mp.GetSolution(state_over_time)

        durations = time_array[1:len(time_array
                                     )] - time_array[0:len(time_array) - 1]
        fuel_consumption = (
            np.sum(booster_over_time[:len(time_array)]**2, axis=1) *
            durations).sum()
        if verbose:
            print 'expected remaining fuel consumption', fuel_consumption
            print("took %s seconds" % (time.time() - start_time))
            print 'goal state', goal_state
            print 'end state', output_states[-1]
            print ''

        return output_states, booster_over_time, time_array
Example #16
0
    def _DoCalcDiscreteVariableUpdates(self, context, events, discrete_state):
        # Call base method to ensure we do not get recursion.
        # (This makes sure relevant event handlers get called.)
        LeafSystem._DoCalcDiscreteVariableUpdates(self, context, events,
                                                  discrete_state)

        new_control_input = discrete_state. \
            get_mutable_vector().get_mutable_value()
        x = self.EvalVectorInput(
            context, self.robot_state_input_port.get_index()).get_value()
        setpoint = self.EvalAbstractInput(
            context, self.setpoint_input_port.get_index()).get_value()

        q_full = x[:self.nq]
        v_full = x[self.nq:]
        q = x[self.controlled_inds]
        v = x[self.nq:][self.controlled_inds]

        kinsol = self.rbt.doKinematics(q_full, v_full)

        M_full = self.rbt.massMatrix(kinsol)
        C = self.rbt.dynamicsBiasTerm(kinsol, {}, None)[self.controlled_inds]
        # Python slicing doesn't work in 2D, so do it row-by-row
        M = np.zeros((self.nq_reduced, self.nq_reduced))
        for k in range(self.nq_reduced):
            M[:, k] = M_full[self.controlled_inds, k]

        # Pick a qdd that results in minimum deviation from the desired
        # end effector pose (according to the end effector frame's jacobian
        # at the current state)
        # v_next = v + control_period * qdd
        # q_next = q + control_period * (v + v_next) / 2.
        # ee_v = J*v
        # ee_p = from forward kinematics
        # ee_v_next = J*v_next
        # ee_p_next = ee_p + control_period * (ee_v + ee_v_next) / 2.
        # min  u and qdd
        #        || q_next - q_des ||_Kq
        #     +  || v_next - v_des ||_Kv
        #     +  || qdd ||_Ka
        #     +  || Kee_v - ee_v_next ||_Kee_pt
        #     +  || Kee_pt - ee_p_next ||_Kee_v
        #     +  the messily-implemented angular ones?
        # s.t. M qdd + C = B u
        # (no contact modeling for now)
        prog = MathematicalProgram()
        qdd = prog.NewContinuousVariables(self.nq_reduced, "qdd")
        u = prog.NewContinuousVariables(self.nu, "u")

        prog.AddQuadraticCost(qdd.T.dot(setpoint.Ka).dot(qdd))

        v_next = v + self.control_period * qdd
        q_next = q + self.control_period * (v + v_next) / 2.
        if setpoint.v_des is not None:
            v_err = setpoint.v_des - v_next
            prog.AddQuadraticCost(v_err.T.dot(setpoint.Kv).dot(v_err))
        if setpoint.q_des is not None:
            q_err = setpoint.q_des - q_next
            prog.AddQuadraticCost(q_err.T.dot(setpoint.Kq).dot(q_err))

        if setpoint.ee_frame is not None and setpoint.ee_pt is not None:
            # Convert x to autodiff for Jacobians computation
            q_full_ad = np.empty(self.nq, dtype=AutoDiffXd)
            for i in range(self.nq):
                der = np.zeros(self.nq)
                der[i] = 1
                q_full_ad.flat[i] = AutoDiffXd(q_full.flat[i], der)
            kinsol_ad = self.rbt.doKinematics(q_full_ad)

            tf_ad = self.rbt.relativeTransform(kinsol_ad, 0, setpoint.ee_frame)

            # Compute errors in EE pt position and velocity (in world frame)
            ee_p_ad = tf_ad[0:3, 0:3].dot(setpoint.ee_pt) + tf_ad[0:3, 3]
            ee_p = np.hstack([y.value() for y in ee_p_ad])

            J_ee = np.vstack([y.derivatives()
                              for y in ee_p_ad]).reshape(3, self.nq)
            J_ee = J_ee[:, self.controlled_inds]

            ee_v = J_ee.dot(v)
            ee_v_next = J_ee.dot(v_next)
            ee_p_next = ee_p + self.control_period * (ee_v + ee_v_next) / 2.

            if setpoint.ee_pt_des is not None:
                ee_p_err = setpoint.ee_pt_des.reshape(
                    (3, 1)) - ee_p_next.reshape((3, 1))
                prog.AddQuadraticCost(
                    (ee_p_err.T.dot(setpoint.Kee_pt).dot(ee_p_err))[0, 0])
            if setpoint.ee_v_des is not None:
                ee_v_err = setpoint.ee_v_des.reshape(
                    (3, 1)) - ee_v_next.reshape((3, 1))
                prog.AddQuadraticCost(
                    (ee_v_err.T.dot(setpoint.Kee_v).dot(ee_v_err))[0, 0])

            # Also compute errors in EE cardinal vector directions vs targets in world frame
            for i, vec in enumerate(
                (setpoint.ee_x_des, setpoint.ee_y_des, setpoint.ee_z_des)):
                if vec is not None:
                    direction = np.zeros(3)
                    direction[i] = 1.
                    ee_dir_ad = tf_ad[0:3, 0:3].dot(direction)
                    ee_dir_p = np.hstack([y.value() for y in ee_dir_ad])
                    J_ee_dir = np.vstack([y.derivatives() for y in ee_dir_ad
                                          ]).reshape(3, self.nq)
                    J_ee_dir = J_ee_dir[:, self.controlled_inds]
                    ee_dir_v = J_ee_dir.dot(v)
                    ee_dir_v_next = J_ee_dir.dot(v_next)
                    ee_dir_p_next = ee_dir_p + self.control_period * (
                        ee_dir_v + ee_dir_v_next) / 2.
                    ee_dir_p_err = vec.reshape((3, 1)) - ee_dir_p_next.reshape(
                        (3, 1))
                    prog.AddQuadraticCost((ee_dir_p_err.T.dot(
                        setpoint.Kee_xyz).dot(ee_dir_p_err))[0, 0])
                    prog.AddQuadraticCost((ee_dir_v_next.T.dot(
                        setpoint.Kee_xyzd).dot(ee_dir_v_next)))

        LHS = np.dot(M, qdd) + C
        RHS = np.dot(self.B, u)
        for i in range(self.nq_reduced):
            prog.AddLinearConstraint(LHS[i] == RHS[i])

        result = prog.Solve()
        u = prog.GetSolution(u)
        new_control_input[:] = u
Example #17
0
    def solve(self, quad_start_q, quad_final_q, time_used):
        mp = MathematicalProgram()

        # We want to solve this for a certain number of knot points
        N = 40  # num knot points
        time_increment = time_used / (N + 1)
        dt = time_increment
        time_array = np.arange(0.0, time_used, time_increment)

        quad_u = mp.NewContinuousVariables(2, "u_0")
        quad_q = mp.NewContinuousVariables(6, "quad_q_0")

        for i in range(1, N):
            u = mp.NewContinuousVariables(2, "u_%d" % i)
            quad = mp.NewContinuousVariables(6, "quad_q_%d" % i)

            quad_u = np.vstack((quad_u, u))
            quad_q = np.vstack((quad_q, quad))

        for i in range(N):
            mp.AddLinearConstraint(quad_u[i][0] <= 3.0)  # force
            mp.AddLinearConstraint(quad_u[i][0] >= 0.0)  # force
            mp.AddLinearConstraint(quad_u[i][1] <= 10.0)  # torque
            mp.AddLinearConstraint(quad_u[i][1] >= -10.0)  # torque

            mp.AddLinearConstraint(quad_q[i][0] <= 1000.0)  # pos x
            mp.AddLinearConstraint(quad_q[i][0] >= -1000.0)
            mp.AddLinearConstraint(quad_q[i][1] <= 1000.0)  # pos y
            mp.AddLinearConstraint(quad_q[i][1] >= -1000.0)
            mp.AddLinearConstraint(
                quad_q[i][2] <= 60.0 * np.pi / 180.0)  # pos theta
            mp.AddLinearConstraint(quad_q[i][2] >= -60.0 * np.pi / 180.0)
            mp.AddLinearConstraint(quad_q[i][3] <= 1000.0)  # vel x
            mp.AddLinearConstraint(quad_q[i][3] >= -1000.0)
            mp.AddLinearConstraint(quad_q[i][4] <= 1000.0)  # vel y
            mp.AddLinearConstraint(quad_q[i][4] >= -1000.0)
            mp.AddLinearConstraint(quad_q[i][5] <= 10.0)  # vel theta
            mp.AddLinearConstraint(quad_q[i][5] >= -10.0)

        for i in range(1, N):
            quad_q_dyn_feasible = self.dynamics(quad_q[i - 1, :],
                                                quad_u[i - 1, :], dt)

            # Direct transcription constraints on states to dynamics
            for j in range(6):
                quad_state_err = (quad_q[i][j] - quad_q_dyn_feasible[j])
                eps = 0.001
                mp.AddConstraint(quad_state_err <= eps)
                mp.AddConstraint(quad_state_err >= -eps)

        # Initial and final quad and ball states
        for j in range(6):
            mp.AddLinearConstraint(quad_q[0][j] == quad_start_q[j])
            mp.AddLinearConstraint(quad_q[-1][j] == quad_final_q[j])

        # Quadratic cost on the control input
        R_force = 10.0
        R_torque = 100.0
        Q_quad_x = 100.0
        Q_quad_y = 100.0
        mp.AddQuadraticCost(R_force * quad_u[:, 0].dot(quad_u[:, 0]))
        mp.AddQuadraticCost(R_torque * quad_u[:, 1].dot(quad_u[:, 1]))
        mp.AddQuadraticCost(Q_quad_x * quad_q[:, 0].dot(quad_q[:, 1]))
        mp.AddQuadraticCost(Q_quad_y * quad_q[:, 1].dot(quad_q[:, 1]))

        # Solve the optimization
        print "Number of decision vars: ", mp.num_vars()

        # mp.SetSolverOption(SolverType.kSnopt, "Major iterations limit", 100000)

        print "Solve: ", mp.Solve()

        quad_traj = mp.GetSolution(quad_q)
        input_traj = mp.GetSolution(quad_u)

        return (quad_traj, input_traj, time_array)
def optimal_trajectory(joints,
                       start_position,
                       end_position,
                       signed_dist_fn,
                       nc,
                       time=10,
                       knot_points=100):
    assert len(joints) == len(start_position)
    assert len(joints) == len(end_position)

    h = time / (knot_points - 1)
    nq = len(joints)
    prog = MathematicalProgram()
    q_var = []
    v_var = []
    for ii in range(knot_points):
        q_var.append(prog.NewContinuousVariables(nq, "q[" + str(ii) + "]"))
        v_var.append(prog.NewContinuousVariables(nq, "v[" + str(ii) + "]"))

    # ---------------------------------------------------------------
    # Initial & Final Pose Constraints ------------------------------
    x_i = np.append(start_position, np.zeros(nq))
    x_i_vars = np.append(q_var[0], v_var[0])
    prog.AddLinearEqualityConstraint(np.eye(2 * nq), x_i, x_i_vars)
    tol = 0.01 * np.ones(2 * nq)
    x_f = np.append(end_position, np.zeros(nq))
    x_f_vars = np.append(q_var[-1], v_var[-1])
    prog.AddBoundingBoxConstraint(x_f - tol, x_f + tol, x_f_vars)

    # ---------------------------------------------------------------
    # Dynamics Constraints ------------------------------------------
    for ii in range(knot_points - 1):
        dyn_con1 = np.hstack((np.eye(nq), np.eye(nq), -np.eye(nq)))
        dyn_var1 = np.concatenate((q_var[ii], v_var[ii], q_var[ii + 1]))
        prog.AddLinearEqualityConstraint(dyn_con1, np.zeros(nq), dyn_var1)

    # ---------------------------------------------------------------
    # Joint Limit Constraints ---------------------------------------
    q_min = np.array([j.lower_limits() for j in joints])
    q_max = np.array([j.upper_limits() for j in joints])
    for ii in range(knot_points):
        prog.AddBoundingBoxConstraint(q_min, q_max, q_var[ii])

    # ---------------------------------------------------------------
    # Collision Constraints -----------------------------------------
    for ii in range(knot_points):
        prog.AddConstraint(signed_dist_fn, np.zeros(nc), 1e8 * np.ones(nc),
                           q_var[ii])

    # ---------------------------------------------------------------
    # Dynamics Constraints ------------------------------------------
    for ii in range(knot_points):
        prog.AddQuadraticErrorCost(np.eye(nq), np.zeros(nq), v_var[ii])

    xi = np.array(start_position)
    xf = np.array(end_position)
    for ii in range(knot_points):
        prog.SetInitialGuess(q_var[ii],
                             ii * (xf - xi) / (knot_points - 1) + xi)
        prog.SetInitialGuess(v_var[ii], np.zeros(nq))

    result = prog.Solve()
    print prog.GetSolverId().name()
    if result != SolutionResult.kSolutionFound:
        print result
        return None
    q_path = []
    # print signed_dist_fn(prog.GetSolution(q_var[0]))
    for ii in range(knot_points):
        q_path.append(prog.GetSolution(q_var[ii]))
    return q_path
Example #19
0
    def compute_trajectory_to_other_world(self, state_initial, minimum_time, maximum_time):
        '''
        Your mission is to implement this function.

        A successful implementation of this function will compute a dynamically feasible trajectory
        which satisfies these criteria:
            - Efficiently conserve fuel
            - Reach "orbit" of the far right world
            - Approximately obey the dynamic constraints
            - Begin at the state_initial provided
            - Take no more than maximum_time, no less than minimum_time

        The above are defined more precisely in the provided notebook.

        Please note there are two return args.

        :param: state_initial: :param state_initial: numpy array of length 4, see rocket_dynamics for documentation
        :param: minimum_time: float, minimum time allowed for trajectory
        :param: maximum_time: float, maximum time allowed for trajectory

        :return: three return args separated by commas:

            trajectory, input_trajectory, time_array

            trajectory: a 2d array with N rows, and 4 columns. See simulate_states_over_time for more documentation.
            input_trajectory: a 2d array with N-1 row, and 2 columns. See simulate_states_over_time for more documentation.
            time_array: an array with N rows. 

        '''
        from pydrake.all import MathematicalProgram
        import numpy as np
        import math
        N = 100
        t = maximum_time

        #input_trajectory = np.ones((N,2))*10.0
        #time_used = 100.0
        #time_array = np.arange(0.0, time_used, time_used/(N+1))
       
        #trajectory = self.simulate_states_over_time(state_initial, time_array, input_trajectory)
        #return trajectory, input_trajectory, time_array

        
        mp = MathematicalProgram()
        
        def addLinearEqual(x, y, length):
            for i in range(length):
                mp.AddLinearConstraint(x[i] == y[i])
                
                
        def addEpsilonEq(a, b, e):
            mp.AddConstraint(a <= b + e)
            mp.AddConstraint(a >= b - e)
            
        def addEqual(x, y, length, e):
            for i in range(length):
                addEpsilonEq(x[i], y[i], e)
                
        def worldTwoDistSquared(x):
            return np.sum((self.world_2_position - x) ** 2)
            
        inp_traj = mp.NewContinuousVariables(N-1, 2, "inp")
        traj = mp.NewContinuousVariables(N, 4, "traj")
        
        #mp.NewContinuousVariables(1, "t")
        #mp.AddLinearConstraint(t[0] == 10.0)
        time_step = t / N

        addLinearEqual(traj[0], state_initial, 4)
            
        for i in range(N-1):
            #todo add constraint forcing orbit to not occur until time t
            predicted = traj[i] + time_step * self.rocket_dynamics(traj[i], inp_traj[i])
            addEqual(traj[i+1], predicted, 4, 1e-8)
            mp.AddQuadraticCost(np.sum(inp_traj[i]**2))
            #mp.AddQuadraticCost(time_step*np.sum(inp_traj[i]**2))
            
        addEpsilonEq(worldTwoDistSquared(traj[-1][:2]), 0.5**2, 1e-8)
        velocity = traj[-1][2:] / time_step
        addEpsilonEq(velocity.dot(velocity), self.G*self.M2/0.5, 1e-8)
        addEpsilonEq(velocity.dot(traj[-1][:2] - self.world_2_position), 0, 1e-8)
        
        #mp.AddLinearConstraint(t == 100.0)
        #mp.AddLinearConstraint(t >= minimum_time)
        #mp.AddLinearConstraint(t <= maximum_time)
        
        print(mp)
        print(mp.Solve())
        time_used = t # mp.GetSolution(t)
        time_array = np.arange(0.0, time_used, time_used/N)
        
        trajectory = mp.GetSolution(traj)
        input_trajectory = mp.GetSolution(inp_traj)
        
        #print('time=', time_array)
        #print('input=', input_trajectory)
        #print('traj=', trajectory)
        true_trajectory = self.simulate_states_over_time(state_initial, time_array, input_trajectory)
        #print(trajectory - true_trajectory)
        #print(trajectory)
        #print(true_trajectory)
        return trajectory, input_trajectory, time_array
    
        #todo add constraint ensuring orbit to not occurs at time t

        #mp.AddLinearCost(x[0]*1.0)

        #input_trajectory = np.ones((N,2))*10.0
        #time_used = 100.0
        #time_array = np.arange(0.0, time_used, time_used/(N+1))
        
        #trajectory = self.simulate_states_over_time(state_initial, time_array, input_trajectory)
        #return trajectory, input_trajectory, time_array
from pydrake.all import (Jacobian, MathematicalProgram, SolutionResult,
                         Variables)


def dynamics(x):
    return -x + x**3


prog = MathematicalProgram()
x = prog.NewIndeterminates(1, "x")
rho = prog.NewContinuousVariables(1, "rho")[0]

# Define the Lyapunov function.
V = x.dot(x)
Vdot = Jacobian([V], x).dot(dynamics(x))[0]

# Define the Lagrange multipliers.
(lambda_, constraint) = prog.NewSosPolynomial(Variables(x), 4)

prog.AddSosConstraint((V-rho) * x.dot(x) - lambda_.ToExpression() * Vdot)
prog.AddLinearCost(-rho)

result = prog.Solve()

assert(result == SolutionResult.kSolutionFound)

print("Verified that " + str(V) + " < " + str(prog.GetSolution(rho)) +
      " is in the region of attraction.")

assert(math.fabs(prog.GetSolution(rho) - 1) < 1e-5)
Example #21
0
def mixed_integer_quadratic_program(nc, H, f, A, b, C=None, d=None, **kwargs):
    """
    Solves the strictly convex (H > 0) mixed-integer quadratic program min .5 x' H x + f' x s.t. A x <= b, C x  = d.
    The first nc variables in x are continuous, the remaining are binaries.

    Arguments
    ----------
    nc : int
        Number of continuous variables in the problem.
    H : numpy.ndarray
        Positive definite Hessian of the cost function.
    f : numpy.ndarray
        Gradient of the cost function.
    A : numpy.ndarray
        Left-hand side of the inequality constraints.
    b : numpy.ndarray
        Right-hand side of the inequality constraints.
    C : numpy.ndarray
        Left-hand side of the equality constraints.
    d : numpy.ndarray
        Right-hand side of the equality constraints.

    Returns
    ----------
    sol : dict
        Dictionary with the solution of the MIQP.

        Fields
        ----------
        min : float
            Minimum of the MIQP (None if the problem is unfeasible).
        argmin : numpy.ndarray
            Argument that minimizes the MIQP (None if the problem is unfeasible).
    """

    # check equalities
    if (C is None) != (d is None):
        raise ValueError('missing C or d.')

    # problem size
    n_ineq, n_x = A.shape
    if C is not None:
        n_eq = C.shape[0]
    else:
        n_eq = 0

    # build program
    prog = MathematicalProgram()
    x = np.hstack((
        prog.NewContinuousVariables(nc),
        prog.NewBinaryVariables(n_x - nc)
        ))
    [prog.AddLinearConstraint(A[i].dot(x) <= b[i]) for i in range(n_ineq)]
    [prog.AddLinearConstraint(C[i].dot(x) == d[i]) for i in range(n_eq)]
    prog.AddQuadraticCost(.5*x.dot(H).dot(x) + f.dot(x))

    # solve
    solver = GurobiSolver()
    prog.SetSolverOption(solver.solver_type(), 'OutputFlag', 0)
    [prog.SetSolverOption(solver.solver_type(), parameter, value) for parameter, value in kwargs.items()]
    result = prog.Solve()

    # initialize output
    sol = {
        'min': None,
        'argmin': None
    }

    if result == SolutionResult.kSolutionFound:
        sol['argmin'] = prog.GetSolution(x)
        sol['min'] = .5*sol['argmin'].dot(H).dot(sol['argmin']) + f.dot(sol['argmin'])

    return sol
Example #22
0
def test_Feedback_Linearization_controller_BS(x, t):
    # Output feedback linearization 2
    #
    # y1= ||r-rf||^2/2
    # y2= wx
    # y3= wy
    # y4= wz
    #
    # Analysis: The zero dynamics is unstable.
    global g, xf, Aout, Bout, Mout, T_period, w_freq, radius

    print("%%%%%%%%%%%%%%%%%%%%%")
    print("%%CLF-QP  %%%%%%%%%%%")
    print("%%%%%%%%%%%%%%%%%%%%%")

    print("t:", t)
    prog = MathematicalProgram()
    u_var = prog.NewContinuousVariables(4, "u_var")
    c_var = prog.NewContinuousVariables(1, "c_var")

    # # # Example 1 : Circle

    x_f = radius * math.cos(w_freq * t)
    y_f = radius * math.sin(w_freq * t)
    # print("x_f:",x_f)
    # print("y_f:",y_f)
    dx_f = -radius * math.pow(w_freq, 1) * math.sin(w_freq * t)
    dy_f = radius * math.pow(w_freq, 1) * math.cos(w_freq * t)
    ddx_f = -radius * math.pow(w_freq, 2) * math.cos(w_freq * t)
    ddy_f = -radius * math.pow(w_freq, 2) * math.sin(w_freq * t)
    dddx_f = radius * math.pow(w_freq, 3) * math.sin(w_freq * t)
    dddy_f = -radius * math.pow(w_freq, 3) * math.cos(w_freq * t)
    ddddx_f = radius * math.pow(w_freq, 4) * math.cos(w_freq * t)
    ddddy_f = radius * math.pow(w_freq, 4) * math.sin(w_freq * t)

    # # Example 2 : Lissajous curve a=1 b=2
    # ratio_ab=2;
    # a=1;
    # b=ratio_ab*a;
    # delta_lissajous = 0 #math.pi/2;

    # x_f = radius*math.sin(a*w_freq*t+delta_lissajous)
    # y_f = radius*math.sin(b*w_freq*t)
    # # print("x_f:",x_f)
    # # print("y_f:",y_f)
    # dx_f = radius*math.pow(a*w_freq,1)*math.cos(a*w_freq*t+delta_lissajous)
    # dy_f = radius*math.pow(b*w_freq,1)*math.cos(b*w_freq*t)
    # ddx_f = -radius*math.pow(a*w_freq,2)*math.sin(a*w_freq*t+delta_lissajous)
    # ddy_f = -radius*math.pow(b*w_freq,2)*math.sin(b*w_freq*t)
    # dddx_f = -radius*math.pow(a*w_freq,3)*math.cos(a*w_freq*t+delta_lissajous)
    # dddy_f = -radius*math.pow(b*w_freq,3)*math.cos(b*w_freq*t)
    # ddddx_f = radius*math.pow(a*w_freq,4)*math.sin(a*w_freq*t+delta_lissajous)
    # ddddy_f = radius*math.pow(b*w_freq,4)*math.sin(b*w_freq*t)

    e1 = np.array([1, 0, 0])  # e3 elementary vector
    e2 = np.array([0, 1, 0])  # e3 elementary vector
    e3 = np.array([0, 0, 1])  # e3 elementary vector

    # epsilonn= 1e-0
    # alpha = 100;
    # kp1234=alpha*1/math.pow(epsilonn,4) # gain for y
    # kd1=4/math.pow(epsilonn,3) # gain for y^(1)
    # kd2=12/math.pow(epsilonn,2) # gain for y^(2)
    # kd3=4/math.pow(epsilonn,1)  # gain for y^(3)

    # kp5= 10;                    # gain for y5

    q = np.zeros(7)
    qd = np.zeros(6)
    q = x[0:8]
    qd = x[8:15]

    print("qnorm:", np.dot(q[3:7], q[3:7]))

    q0 = q[3]
    q1 = q[4]
    q2 = q[5]
    q3 = q[6]
    xi1 = q[7]

    v = qd[0:3]
    w = qd[3:6]
    xi2 = qd[6]

    xd = xf[0]
    yd = xf[1]
    zd = xf[2]
    wd = xf[11:14]

    # Useful vectors and matrices

    (Rq, Eq, wIw, I_inv) = robobee_plantBS.GetManipulatorDynamics(q, qd)

    F1q = np.zeros((3, 4))
    F1q[0, :] = np.array([q2, q3, q0, q1])
    F1q[1, :] = np.array([-1 * q1, -1 * q0, q3, q2])
    F1q[2, :] = np.array([q0, -1 * q1, -1 * q2, q3])

    Rqe3 = np.dot(Rq, e3)
    Rqe3_hat = np.zeros((3, 3))
    Rqe3_hat[0, :] = np.array([0, -Rqe3[2], Rqe3[1]])
    Rqe3_hat[1, :] = np.array([Rqe3[2], 0, -Rqe3[0]])
    Rqe3_hat[2, :] = np.array([-Rqe3[1], Rqe3[0], 0])

    Rqe1 = np.dot(Rq, e1)
    Rqe1_hat = np.zeros((3, 3))
    Rqe1_hat[0, :] = np.array([0, -Rqe1[2], Rqe1[1]])
    Rqe1_hat[1, :] = np.array([Rqe1[2], 0, -Rqe1[0]])
    Rqe1_hat[2, :] = np.array([-Rqe1[1], Rqe1[0], 0])

    Rqe1_x = np.dot(e2.T, Rqe1)
    Rqe1_y = np.dot(e1.T, Rqe1)

    w_hat = np.zeros((3, 3))
    w_hat[0, :] = np.array([0, -w[2], w[1]])
    w_hat[1, :] = np.array([w[2], 0, -w[0]])
    w_hat[2, :] = np.array([-w[1], w[0], 0])

    Rw = np.dot(Rq, w)
    Rw_hat = np.zeros((3, 3))
    Rw_hat[0, :] = np.array([0, -Rw[2], Rw[1]])
    Rw_hat[1, :] = np.array([Rw[2], 0, -Rw[0]])
    Rw_hat[2, :] = np.array([-Rw[1], Rw[0], 0])
    #- Checking the derivation

    # print("F1qEqT-(-Rqe3_hat)",np.dot(F1q,Eq.T)-(-Rqe3_hat))
    # Rqe3_cal = np.zeros(3)
    # Rqe3_cal[0] = 2*(q3*q1+q0*q2)
    # Rqe3_cal[1] = 2*(q3*q2-q0*q1)
    # Rqe3_cal[2] = (q0*q0+q3*q3-q1*q1-q2*q2)

    # print("Rqe3 - Rqe3_cal", Rqe3-Rqe3_cal)

    # Four output
    y1 = x[0] - x_f
    y2 = x[1] - y_f
    y3 = x[2] - zd - 0
    y4 = math.atan2(Rqe1_x, Rqe1_y) - math.pi / 8
    # print("Rqe1_x:", Rqe1_x)

    eta1 = np.zeros(3)
    eta1 = np.array([y1, y2, y3])
    eta5 = y4

    # print("y4", y4)

    # First derivative of first three output and yaw output
    eta2 = np.zeros(3)
    eta2 = v - np.array([dx_f, dy_f, 0])
    dy1 = eta2[0]
    dy2 = eta2[1]
    dy3 = eta2[2]

    x2y2 = (math.pow(Rqe1_x, 2) + math.pow(Rqe1_y, 2))  # x^2+y^2

    eta6_temp = np.zeros(3)  #eta6_temp = (ye2T-xe1T)/(x^2+y^2)
    eta6_temp = (Rqe1_y * e2.T - Rqe1_x * e1.T) / x2y2
    # print("eta6_temp:", eta6_temp)
    # Body frame w  ( multiply R)
    eta6 = np.dot(eta6_temp, np.dot(-Rqe1_hat, np.dot(Rq, w)))

    # World frame w
    # eta6 = np.dot(eta6_temp,np.dot(-Rqe1_hat,w))
    # print("Rqe1_hat:", Rqe1_hat)

    # Second derivative of first three output
    eta3 = np.zeros(3)
    eta3 = -g * e3 + Rqe3 * xi1 - np.array([ddx_f, ddy_f, 0])
    ddy1 = eta3[0]
    ddy2 = eta3[1]
    ddy3 = eta3[2]

    # Third derivative of first three output
    eta4 = np.zeros(3)
    # Body frame w ( multiply R)
    eta4 = Rqe3 * xi2 + np.dot(-Rqe3_hat, np.dot(Rq, w)) * xi1 - np.array(
        [dddx_f, dddy_f, 0])

    # World frame w
    # eta4 = Rqe3*xi2+np.dot(np.dot(F1q,Eq.T),w)*xi1 - np.array([dddx_f,dddy_f,0])
    dddy1 = eta4[0]
    dddy2 = eta4[1]
    dddy3 = eta4[2]

    # Fourth derivative of first three output
    B_qw_temp = np.zeros(3)
    # Body frame w
    B_qw_temp = xi1 * (-np.dot(Rw_hat, np.dot(Rqe3_hat, Rw)) +
                       np.dot(Rqe3_hat, np.dot(Rq, np.dot(I_inv, wIw)))
                       )  # np.dot(I_inv,wIw)*xi1-2*w*xi2
    B_qw = B_qw_temp + xi2 * (-2 * np.dot(Rqe3_hat, Rw)) - np.array(
        [ddddx_f, ddddy_f, 0])  #np.dot(Rqe3_hat,B_qw_temp)

    # World frame w
    # B_qw_temp = xi1*(-np.dot(w_hat,np.dot(Rqe3_hat,w))+np.dot(Rqe3_hat,np.dot(I_inv,wIw))) # np.dot(I_inv,wIw)*xi1-2*w*xi2
    # B_qw      = B_qw_temp+xi2*(-2*np.dot(Rqe3_hat,w)) - np.array([ddddx_f,ddddy_f,0])   #np.dot(Rqe3_hat,B_qw_temp)

    # B_qw = B_qw_temp - np.dot(w_hat,np.dot(Rqe3_hat,w))*xi1

    # Second derivative of yaw output

    # Body frame w
    dRqe1_x = np.dot(e2, np.dot(-Rqe1_hat, Rw))  # \dot{x}
    dRqe1_y = np.dot(e1, np.dot(-Rqe1_hat, Rw))  # \dot{y}
    alpha1 = 2 * (Rqe1_x * dRqe1_x +
                  Rqe1_y * dRqe1_y) / x2y2  # (2xdx +2ydy)/(x^2+y^2)

    # World frame w
    # dRqe1_x = np.dot(e2,np.dot(-Rqe1_hat,w)) # \dot{x}
    # dRqe1_y = np.dot(e1,np.dot(-Rqe1_hat,w)) # \dot{y}

    # alpha1 = 2*(Rqe1_x*dRqe1_x+Rqe1_y*dRqe1_y)/x2y2 # (2xdx +2ydy)/(x^2+y^2)
    # # alpha2 = math.pow(dRqe1_y,2)-math.pow(dRqe1_x,2)

    # Body frame w

    B_yaw_temp3 = np.zeros(3)
    B_yaw_temp3 = alpha1 * np.dot(Rqe1_hat, Rw) + np.dot(
        Rqe1_hat, np.dot(Rq, np.dot(I_inv, wIw))) - np.dot(
            Rw_hat, np.dot(Rqe1_hat, Rw))

    B_yaw = np.dot(eta6_temp,
                   B_yaw_temp3)  # +alpha2 :Could be an error in math.
    g_yaw = np.zeros(3)
    g_yaw = -np.dot(eta6_temp, np.dot(Rqe1_hat, np.dot(Rq, I_inv)))

    # World frame w
    # B_yaw_temp3 =np.zeros(3)
    # B_yaw_temp3 = alpha1*np.dot(Rqe1_hat,w)+np.dot(Rqe1_hat,np.dot(I_inv,wIw))-np.dot(w_hat,np.dot(Rqe1_hat,w))

    # B_yaw = np.dot(eta6_temp,B_yaw_temp3) # +alpha2 :Could be an error in math.
    # g_yaw = np.zeros(3)
    # g_yaw = -np.dot(eta6_temp,np.dot(Rqe1_hat,I_inv))

    # print("g_yaw:", g_yaw)
    # Decoupling matrix A(x)\in\mathbb{R}^4

    A_fl = np.zeros((4, 4))
    A_fl[0:3, 0] = Rqe3
    # Body frame w
    A_fl[0:3, 1:4] = -np.dot(Rqe3_hat, np.dot(Rq, I_inv)) * xi1
    # World frame w
    # A_fl[0:3,1:4] = -np.dot(Rqe3_hat,I_inv)*xi1
    A_fl[3, 1:4] = g_yaw

    A_fl_inv = np.linalg.inv(A_fl)
    A_fl_det = np.linalg.det(A_fl)
    # print("I_inv:", I_inv)
    print("A_fl:", A_fl)
    print("A_fl_det:", A_fl_det)

    # Drift in the output dynamics

    U_temp = np.zeros(4)
    U_temp[0:3] = B_qw
    U_temp[3] = B_yaw

    # Output dyamics

    eta = np.hstack([eta1, eta2, eta3, eta5, eta4, eta6])
    eta_norm = np.dot(eta, eta)

    # v-CLF QP controller

    FP_PF = np.dot(Aout.T, Mout) + np.dot(Mout, Aout)
    PG = np.dot(Mout, Bout)

    L_FVx = np.dot(eta, np.dot(FP_PF, eta))
    L_GVx = 2 * np.dot(eta.T, PG)  # row vector
    L_fhx_star = U_temp

    Vx = np.dot(eta, np.dot(Mout, eta))
    # phi0_exp = L_FVx+np.dot(L_GVx,L_fhx_star)+(min_e_Q/max_e_P)*Vx*1    # exponentially stabilizing
    # phi0_exp = L_FVx+np.dot(L_GVx,L_fhx_star)+min_e_Q*eta_norm      # more exact bound - exponentially stabilizing
    phi0_exp = L_FVx + np.dot(
        L_GVx, L_fhx_star)  # more exact bound - exponentially stabilizing

    phi1_decouple = np.dot(L_GVx, A_fl)

    # # Solve QP
    v_var = np.dot(A_fl, u_var) + L_fhx_star
    Quadratic_Positive_def = np.matmul(A_fl.T, A_fl)
    QP_det = np.linalg.det(Quadratic_Positive_def)
    c_QP = 2 * np.dot(L_fhx_star.T, A_fl)

    c_QP_extned = np.hstack([c_QP, -1])
    u_var_extended = np.hstack([u_var, c_var])

    # CLF_QP_cost_v = np.dot(v_var,v_var) // Exact quadratic cost
    CLF_QP_cost_v_effective = np.dot(
        u_var, np.dot(Quadratic_Positive_def, u_var)) + np.dot(
            c_QP, u_var) - c_var[0]  # Quadratic cost without constant term

    # CLF_QP_cost_u = np.dot(u_var,u_var)

    phi1 = np.dot(phi1_decouple, u_var) + c_var[0] * eta_norm

    #----Printing intermediate states

    # print("L_fhx_star: ",L_fhx_star)
    # print("c_QP:", c_QP)
    # print("Qp : ",Quadratic_Positive_def)
    # print("Qp det: ", QP_det)
    # print("c_QP", c_QP)

    # print("phi0_exp: ", phi0_exp)
    # print("PG:", PG)
    # print("L_GVx:", L_GVx)
    # print("eta6", eta6)
    # print("d : ", phi1_decouple)
    # print("Cost expression:", CLF_QP_cost_v)
    #print("Const expression:", phi0_exp+phi1)

    #----Different solver option // Gurobi did not work with python at this point (some binding issue 8/8/2018)
    solver = IpoptSolver()
    # solver = GurobiSolver()
    # print solver.available()
    # assert(solver.available()==True)
    # assertEqual(solver.solver_type(), mp.SolverType.kGurobi)
    # solver.Solve(prog)
    # assertEqual(result, mp.SolutionResult.kSolutionFound)

    # mp.AddLinearConstraint()
    # print("x:", x)
    # print("phi_0_exp:", phi0_exp)
    # print("phi1_decouple:", phi1_decouple)

    # print("eta1:", eta1)
    # print("eta2:", eta2)
    # print("eta3:", eta3)
    # print("eta4:", eta4)
    # print("eta5:", eta5)
    # print("eta6:", eta6)

    # Output Feedback Linearization controller
    mu = np.zeros(4)
    k = np.zeros((4, 14))
    k = np.matmul(Bout.T, Mout)
    k = np.matmul(np.linalg.inv(R), k)

    mu = -1 / 1 * np.matmul(k, eta)

    v = -U_temp + mu

    U_fl = np.matmul(
        A_fl_inv, v
    )  # Output Feedback controller to comare the result with CLF-QP solver

    # Set up the QP problem
    prog.AddQuadraticCost(CLF_QP_cost_v_effective)
    prog.AddConstraint(phi0_exp + phi1 <= 0)
    prog.AddConstraint(c_var[0] >= 0)
    prog.AddConstraint(c_var[0] <= 100)
    prog.AddConstraint(u_var[0] <= 30)
    prog.SetSolverOption(mp.SolverType.kIpopt, "print_level",
                         5)  # CAUTION: Assuming that solver used Ipopt

    print("CLF value:", Vx)  # Current CLF value

    prog.SetInitialGuess(u_var, U_fl)
    prog.Solve()  # Solve with default osqp

    # solver.Solve(prog)
    print("Optimal u : ", prog.GetSolution(u_var))
    U_CLF_QP = prog.GetSolution(u_var)
    C_CLF_QP = prog.GetSolution(c_var)

    #---- Printing for debugging
    # dx = robobee_plantBS.evaluate_f(U_fl,x)
    # print("dx", dx)
    # print("\n######################")
    # # print("qe3:", A_fl[0,0])
    # print("u:", u)
    # print("\n####################33")

    # deta4 = B_qw+Rqe3*U_fl_zero[0]+np.matmul(-np.matmul(Rqe3_hat,I_inv),U_fl_zero[1:4])*xi1
    # deta6 = B_yaw+np.dot(g_yaw,U_fl_zero[1:4])
    # print("deta4:",deta4)
    # print("deta6:",deta6)
    print(C_CLF_QP)

    phi1_opt = np.dot(phi1_decouple, U_CLF_QP) + C_CLF_QP * eta_norm
    phi1_opt_FL = np.dot(phi1_decouple, U_fl)

    print("FL u: ", U_fl)
    print("CLF u:", U_CLF_QP)
    print("Cost FL: ", np.dot(mu, mu))

    v_CLF = np.dot(A_fl, U_CLF_QP) + L_fhx_star
    print("Cost CLF: ", np.dot(v_CLF, v_CLF))
    print("Constraint FL : ", phi0_exp + phi1_opt_FL)
    print("Constraint CLF : ", phi0_exp + phi1_opt)
    u = U_CLF_QP

    print("eigenvalues minQ maxP:", [min_e_Q, max_e_P])

    return u
Example #23
0
def achieves_force_closure(points, normals, mu):
    """
    Determines whether the given forces achieve force closure.

    Note that each of points and normals are lists of variable 
    length, but should be the same length.

    Here's an example valid set of input args:
        points  = [np.asarray([0.1, 0.2]), np.asarray([0.5,-0.1])]
        normals = [np.asarray([-1.0,0.0]), np.asarray([0.0, 1.0])]
        mu = 0.2
        achieves_force_closure(points, normals, mu)

    NOTE: your normals should be normalized (be unit vectors)

    :param points: a list of arrays where each array is the
        position of the force points relative to the center of mass.
    :type points: a list of 1-d numpy arrays of shape (2,)

    :param normals: a list of arrays where each array is the normal
        of the local surface of the object, pointing in towards
        the object
    :type normals: a list of 1-d numpy arrays of shape (2,)

    :param mu: coulombic static friction coefficient
    :type mu: float, greater than or equal to 0

    :return: whether or not the given parameters achieves force closure
    :rtype: bool (True or False)
    """
    assert len(points) == len(normals)
    assert mu >= 0.0

    G = get_G(points, normals)
    if np.linalg.matrix_rank(G) != 3:
        return False

    from pydrake.all import MathematicalProgram
    from pydrake.solvers import mathematicalprogram as mpa

    mp = MathematicalProgram()

    f = mp.NewContinuousVariables(2 * len(points), "f")
    gamma = mp.NewContinuousVariables(1, "gamma")

    mp.AddLinearConstraint(gamma[0] <= 0)
    mp.AddLinearConstraint(gamma[0] >= -1)

    for i in range(len(points)):
        mp.AddLinearConstraint(f[2 * i] <= mu * f[2 * i + 1] + gamma[0])
        mp.AddLinearConstraint(-f[2 * i] <= mu * f[2 * i + 1] + gamma[0])
        mp.AddLinearConstraint(f[2 * i] <= 1000)
        mp.AddLinearConstraint(f[2 * i] >= -1000)
        mp.AddLinearConstraint(f[2 * i + 1] + gamma[0] >= 0)
        mp.AddLinearConstraint(f[2 * i + 1] <= 1000)

    gf = G.dot(f)
    #print(gf.shape)
    assert (gf.shape == (3, ))
    for i in range(3):
        mp.AddLinearConstraint(gf[i] == 0)

    mp.AddLinearCost(gamma[0])
    #mp.AddQuadraticCost(np.sum(f**2))

    #print(mp)
    x = mp.Solve()
    #print(mp.Solve())
    if x == mpa.SolutionResult.kSolutionFound:
        #print(mp.GetSolution(f))
        g = mp.GetSolution(gamma)
        #print(g)
        #print(g[0] < 0)
        return g[0] < 0
    return False