示例#1
0
    def __discretize_friction(self, normal, tangent):
        """
        Rotates the terrain tangent vectors to discretize the friction cone
        
        Arguments:
            normal:  The terrain normal direction, (1x3) numpy array
            tangent:  The terrain tangent directions, (2x3) numpy array
        Return Values:
            all_tangents: The discretized friction vectors, (2nx3) numpy array

        This method is now deprecated
        """
        # Reflect the current friction basis
        tangent = np.concatenate((tangent, -tangent), axis=0)
        all_tangents = np.zeros((4 * (self._dlevel), tangent.shape[1]))
        all_tangents[0:4, :] = tangent
        # Rotate the tangent basis around the normal vector
        for n in range(1, self._dlevel):
            # Create an angle-axis representation of rotation
            R = RotationMatrix(theta_lambda=AngleAxis(
                angle=n * pi / (2 * (self._dlevel)), axis=normal))
            # Apply the rotation matrix
            all_tangents[n * 4:(n + 1) * 4, :] = R.multiply(
                tangent.transpose()).transpose()
        return all_tangents
示例#2
0
def solve_inverse_kinematics(mbp, target_frame, target_pose,
        max_position_error=0.005, theta_bound=0.01*np.pi, initial_guess=None):
    if initial_guess is None:
        # TODO: provide initial guess for some joints (like iiwa joint0)
        initial_guess = np.zeros(mbp.num_positions())
        for joint in prune_fixed_joints(get_joints(mbp)):
            lower, upper = get_joint_limits(joint)
            if -np.inf < lower < upper < np.inf:
                initial_guess[joint.position_start()] = random.uniform(lower, upper)
    assert mbp.num_positions() == len(initial_guess)

    ik_scene = InverseKinematics(mbp)
    world_frame = mbp.world_frame()

    ik_scene.AddOrientationConstraint(
        frameAbar=target_frame, R_AbarA=RotationMatrix.Identity(),
        frameBbar=world_frame, R_BbarB=RotationMatrix(target_pose.rotation()),
        theta_bound=theta_bound)

    lower = target_pose.translation() - max_position_error
    upper = target_pose.translation() + max_position_error
    ik_scene.AddPositionConstraint(
        frameB=target_frame, p_BQ=np.zeros(3),
        frameA=world_frame, p_AQ_lower=lower, p_AQ_upper=upper)
    # AddAngleBetweenVectorsConstraint
    # AddGazeTargetConstraint

    prog = ik_scene.prog()
    prog.SetInitialGuess(ik_scene.q(), initial_guess)
    result = prog.Solve()
    if result != SolutionResult.kSolutionFound:
        return None
    return prog.GetSolution(ik_scene.q())
示例#3
0
def AddPlanarBinAndSimpleBox(plant,
                             mass=1.0,
                             mu=1.0,
                             width=0.2,
                             depth=0.05,
                             height=0.3):
    parser = Parser(plant)
    bin = parser.AddModelFromFile(FindResource("models/planar_bin.sdf"))
    plant.WeldFrames(
        plant.world_frame(), plant.GetFrameByName("bin_base", bin),
        RigidTransform(RotationMatrix.MakeZRotation(np.pi / 2.0),
                       [0, 0, -0.015]))

    planar_joint_frame = plant.AddFrame(
        FixedOffsetFrame(
            "planar_joint_frame", plant.world_frame(),
            RigidTransform(RotationMatrix.MakeXRotation(np.pi / 2))))

    # TODO(russt): make this a *random* box?
    # TODO(russt): move random box to a shared py file.
    box_instance = AddShape(plant, Box(width, depth, height), "box", mass, mu)
    box_joint = plant.AddJoint(
        PlanarJoint("box", planar_joint_frame,
                    plant.GetFrameByName("box", box_instance)))
    box_joint.set_position_limits([-.5, -.1, -np.pi], [.5, .3, np.pi])
    box_joint.set_velocity_limits([-2, -2, -2], [2, 2, 2])
    box_joint.set_default_translation([0, depth / 2.0])
    return box_instance
示例#4
0
def AddTriad(source_id,
             frame_id,
             scene_graph,
             length=.25,
             radius=0.01,
             opacity=1.,
             X_FT=RigidTransform(),
             name="frame"):
    """
    Adds illustration geometry representing the coordinate frame, with the
    x-axis drawn in red, the y-axis in green and the z-axis in blue. The axes
    point in +x, +y and +z directions, respectively.

    Args:
      source_id: The source registered with SceneGraph.
      frame_id: A geometry::frame_id registered with scene_graph.
      scene_graph: The SceneGraph with which we will register the geometry.
      length: the length of each axis in meters.
      radius: the radius of each axis in meters.
      opacity: the opacity of the coordinate axes, between 0 and 1.
      X_FT: a RigidTransform from the triad frame T to the frame_id frame F
      name: the added geometry will have names name + " x-axis", etc.
    """
    # x-axis
    X_TG = RigidTransform(RotationMatrix.MakeYRotation(np.pi / 2),
                          [length / 2., 0, 0])
    geom = GeometryInstance(X_FT.multiply(X_TG), Cylinder(radius, length),
                            name + " x-axis")
    geom.set_illustration_properties(
        MakePhongIllustrationProperties([1, 0, 0, opacity]))
    scene_graph.RegisterGeometry(source_id, frame_id, geom)

    # y-axis
    X_TG = RigidTransform(RotationMatrix.MakeXRotation(np.pi / 2),
                          [0, length / 2., 0])
    geom = GeometryInstance(X_FT.multiply(X_TG), Cylinder(radius, length),
                            name + " y-axis")
    geom.set_illustration_properties(
        MakePhongIllustrationProperties([0, 1, 0, opacity]))
    scene_graph.RegisterGeometry(source_id, frame_id, geom)

    # z-axis
    X_TG = RigidTransform([0, 0, length / 2.])
    geom = GeometryInstance(X_FT.multiply(X_TG), Cylinder(radius, length),
                            name + " z-axis")
    geom.set_illustration_properties(
        MakePhongIllustrationProperties([0, 0, 1, opacity]))
    scene_graph.RegisterGeometry(source_id, frame_id, geom)
 def _sample_child(parent_tf, child_info):
     # Given a ChildInfo struct, produce a randomly sampled child.
     # Sample child xyz and rotation
     xyz_offset = np.random.uniform(
         low=child_info.child_xyz_bounds.xyz_min,
         high=child_info.child_xyz_bounds.xyz_max
     )
     if child_info.child_rotation_bounds is None:
         # No constraints on child rotation; randomly
         # sample a rotation instead.
         R_offset = UniformlyRandomRotationMatrix(RandomGenerator(np.random.randint(2**31)))
     else:
         axis = child_info.child_rotation_bounds.axis
         angle = np.random.uniform(
             low=child_info.child_rotation_bounds.min_angle,
             high=child_info.child_rotation_bounds.max_angle
         )
         R_offset = RotationMatrix(AngleAxis(angle=angle, axis=axis))
     # Child XYZ is in *world* frame, with with translational offset.
     #tf_offset = RigidTransform(p=parent_tf.rotation().inverse().multiply(xyz_offset), R=R_offset)
     #tf = parent_tf.multiply(tf_offset)
     tf = RigidTransform(
         p = parent_tf.translation() + xyz_offset,
         R = parent_tf.rotation().multiply(R_offset)
     )
     return child_info.child_type(tf=tf)
示例#6
0
def get_box_from_geom(scene_graph, visual_only=True):
    # TODO: GeometryInstance, InternalGeometry, & GeometryContext to get the shape of objects
    # TODO: get_contact_results_output_port
    # https://github.com/RussTedrake/underactuated/blob/master/src/underactuated/meshcat_visualizer.py
    # https://github.com/RobotLocomotion/drake/blob/master/lcmtypes/lcmt_viewer_draw.lcm
    mock_lcm = DrakeMockLcm()
    DispatchLoadMessage(scene_graph, mock_lcm)
    load_robot_msg = lcmt_viewer_load_robot.decode(
        mock_lcm.get_last_published_message("DRAKE_VIEWER_LOAD_ROBOT"))
    # 'link', 'num_links'
    #builder.Connect(scene_graph.get_pose_bundle_output_port(),
    #                viz.get_input_port(0))

    # TODO: hash bodies instead
    box_from_geom = {}
    for body_index in range(load_robot_msg.num_links):
        # 'geom', 'name', 'num_geom', 'robot_num'
        link = load_robot_msg.link[body_index]
        [source_name, frame_name] = link.name.split("::")
        # source_name = 'Source_1'
        model_index = link.robot_num

        visual_index = 0
        for geom in sorted(link.geom, key=lambda g: g.position[::-1]): # sort by z, y, x
            # 'color', 'float_data', 'num_float_data', 'position', 'quaternion', 'string_data', 'type'
            # string_data is empty...
            if visual_only and (geom.color[3] == 0):
                continue

            # TODO: sort by lowest point on the bounding box?
            # TODO: maybe just return the set of bodies in order and let the user decide what to with them
            visual_index += 1 # TODO: affected by transparent visual
            if geom.type == geom.BOX:
                assert geom.num_float_data == 3
                [width, length, height] = geom.float_data
                extent = np.array([width, length, height]) / 2.
            elif geom.type == geom.SPHERE:
                assert geom.num_float_data == 1
                [radius] = geom.float_data
                extent = np.array([radius, radius, radius])
            elif geom.type == geom.CYLINDER:
                assert geom.num_float_data == 2
                [radius, height] = geom.float_data
                extent = np.array([radius, radius, height/2.])
                #meshcat_geom = meshcat.geometry.Cylinder(
                #    geom.float_data[1], geom.float_data[0])
                # In Drake, cylinders are along +z
            #elif geom.type == geom.MESH:
            #    meshcat_geom = meshcat.geometry.ObjMeshGeometry.from_file(
            #            geom.string_data[0:-3] + "obj")
            else:
                #print("Robot {}, link {}, geometry {}: UNSUPPORTED GEOMETRY TYPE {} WAS IGNORED".format(
                #    model_index, frame_name, visual_index-1, geom.type))
                continue
            link_from_box = RigidTransform(
                RotationMatrix(Quaternion(geom.quaternion)), geom.position).GetAsIsometry3() #.GetAsMatrix4()
            box_from_geom[model_index, frame_name, visual_index-1] = \
                (BoundingBox(np.zeros(3), extent), link_from_box, get_geom_name(geom))
    return box_from_geom
示例#7
0
def make_gripper_frames(X_G, X_O):
    """
    Takes a partial specification with X_G["initial"] and X_O["initial"] and X_0["goal"], and 
    returns a X_G and times with all of the pick and place frames populated.
    """
    # Define (again) the gripper pose relative to the object when in grasp.
    p_GgraspO = [0, 0.12, 0]
    R_GgraspO = RotationMatrix.MakeXRotation(np.pi / 2.0).multiply(
        RotationMatrix.MakeZRotation(np.pi / 2.0))
    X_GgraspO = RigidTransform(R_GgraspO, p_GgraspO)
    X_OGgrasp = X_GgraspO.inverse()
    # pregrasp is negative y in the gripper frame (see the figure!).
    X_GgraspGpregrasp = RigidTransform([0, -0.08, 0])

    X_G["pick"] = X_O["initial"].multiply(X_OGgrasp)
    X_G["prepick"] = X_G["pick"].multiply(X_GgraspGpregrasp)
    X_G["place"] = X_O["goal"].multiply(X_OGgrasp)
    X_G["preplace"] = X_G["place"].multiply(X_GgraspGpregrasp)

    # I'll interpolate to a halfway orientation by converting to axis angle and halving the angle
    X_GprepickGpreplace = X_G["prepick"].inverse().multiply(X_G["preplace"])
    angle_axis = X_GprepickGpreplace.rotation().ToAngleAxis()
    X_GprepickGclearance = RigidTransform(
        AngleAxis(angle=angle_axis.angle() / 2.0, axis=angle_axis.axis()),
        X_GprepickGpreplace.translation() / 2.0 + np.array([0, -0.3, 0]))
    X_G["clearance"] = X_G["prepick"].multiply(X_GprepickGclearance)

    # Not lets set timings
    times = {"initial": 0.0}
    X_GinitialGprepick = X_G["initial"].inverse().multiply(X_G["prepick"])
    times["prepick"] = times["initial"] + 10.0 * \
        np.linalg.norm(X_GinitialGprepick.translation())
    # Allow some time for the gripper to close.
    times["pick_start"] = times["prepick"] + 2.0
    times["pick_end"] = times["pick_start"] + 2.0
    times["postpick"] = times["pick_end"] + 2.0
    time_to_from_clearance = 10.0 * \
        np.linalg.norm(X_GprepickGclearance.translation())
    times["clearance"] = times["postpick"] + time_to_from_clearance
    times["preplace"] = times["clearance"] + time_to_from_clearance
    times["place_start"] = times["preplace"] + 2.0
    times["place_end"] = times["place_start"] + 2.0
    times["postplace"] = times["place_end"] + 2.0

    return X_G, times
def addMeshEndEffector(plant, scene_graph, sphere_body):
    # Initialize sphere body
    partial_sphere_mesh = Mesh("models/partial_sphere.obj")
    if plant.geometry_source_is_registered():
        plant.RegisterCollisionGeometry(
            sphere_body,
            RigidTransform(R=RotationMatrix.MakeXRotation(np.pi / 2)),
            partial_sphere_mesh, "sphere_body",
            pydrake.multibody.plant.CoulombFriction(1, 1))
        plant.RegisterVisualGeometry(
            sphere_body,
            RigidTransform(R=RotationMatrix.MakeXRotation(np.pi / 2)),
            partial_sphere_mesh, "sphere_body", [.9, .5, .5, 1.0])  # Color

        geometries = plant.CollectRegisteredGeometries(
            [sphere_body, plant.GetBodyByName("panda_link8")])
        scene_graph.collision_filter_manager().Apply(
            CollisionFilterDeclaration().ExcludeWithin(geometries))
示例#9
0
    def load(self):
        """
        Loads `meshcat` visualization elements.
        @pre The `scene_graph` used to construct this object must be part of a
        fully constructed diagram (e.g. via `DiagramBuilder.Build()`).
        """
        # Intercept load message via mock LCM.
        mock_lcm = DrakeMockLcm()
        DispatchLoadMessage(self._scene_graph, mock_lcm)
        load_robot_msg = lcmt_viewer_load_robot.decode(
            mock_lcm.get_last_published_message("DRAKE_VIEWER_LOAD_ROBOT"))
        # Translate elements to `meshcat`.
        for i in range(load_robot_msg.num_links):
            link = load_robot_msg.link[i]
            [source_name, frame_name] = link.name.split("::")

            for j in range(link.num_geom):
                geom = link.geom[j]
                element_local_tf = RigidTransform(
                    RotationMatrix(Quaternion(geom.quaternion)),
                    geom.position).GetAsMatrix4()

                if geom.type == geom.BOX:
                    assert geom.num_float_data == 3
                    meshcat_geom = meshcat.geometry.Box(geom.float_data)
                elif geom.type == geom.SPHERE:
                    assert geom.num_float_data == 1
                    meshcat_geom = meshcat.geometry.Sphere(geom.float_data[0])
                elif geom.type == geom.CYLINDER:
                    assert geom.num_float_data == 2
                    meshcat_geom = meshcat.geometry.Cylinder(
                        geom.float_data[1],
                        geom.float_data[0])
                    # In Drake, cylinders are along +z
                    # In meshcat, cylinders are along +y
                    # Rotate to fix this misalignment
                    extra_rotation = tf.rotation_matrix(
                        math.pi/2., [1, 0, 0])
                    element_local_tf[0:3, 0:3] = \
                        element_local_tf[0:3, 0:3].dot(
                            extra_rotation[0:3, 0:3])
                elif geom.type == geom.MESH:
                    meshcat_geom = \
                        meshcat.geometry.ObjMeshGeometry.from_file(
                            geom.string_data[0:-3] + "obj")
                else:
                    print "UNSUPPORTED GEOMETRY TYPE ", \
                        geom.type, " IGNORED"
                    continue

                self.vis[self.prefix][source_name][str(link.robot_num)][
                    frame_name][str(j)]\
                    .set_object(meshcat_geom,
                                meshcat.geometry.MeshLambertMaterial(
                                    color=Rgba2Hex(geom.color)))
                self.vis[self.prefix][source_name][str(link.robot_num)][
                    frame_name][str(j)].set_transform(element_local_tf)
示例#10
0
 def __init__(self, with_knife=True):
     self.table_top_z_in_world = 0.736 + 0.057 / 2
     self.manipuland_body_indices = []
     self.manipuland_params = []
     self.tabletop_indices = []
     self.guillotine_blade_index = None
     self.with_knife = with_knife
     self.model_index_dict = {}
     r, p, y = 2.4, 1.9, 3.8
     self.magic_rpy_offset = np.array([r, p, y])
     self.magic_rpy_rotmat = RotationMatrix(RollPitchYaw(r, p, y)).matrix()
示例#11
0
 def AddOrientationConstraint(ik, R_WG, bounds):
     """Add orientation constraint to the ik problem. Implements an inequality
     constraint where the axis-angle difference between f_R(q) and R_WG must be
     within bounds. Can be translated to:
     ik.prog().AddBoundingBoxConstraint(angle_diff(f_R(q), R_WG), -bounds, bounds)
     """
     ik.AddOrientationConstraint(frameAbar=world_frame,
                                 R_AbarA=R_WG,
                                 frameBbar=gripper_frame,
                                 R_BbarB=RotationMatrix(),
                                 theta_bound=bounds)
示例#12
0
    def test_X_WB(self):
        """Testing X_WB"""
        f = self.notebook_locals['compute_X_WB']

        # construct a test case
        theta1, theta2, theta3 = np.pi / 3.0, np.pi / 6.0, np.pi / 4.0
        R_WA = RotationMatrix.MakeXRotation(theta1)
        R_AB = RotationMatrix.MakeZRotation(theta2)
        R_CB = RotationMatrix.MakeYRotation(theta3)

        X_WA = RigidTransform(R_WA, [0.1, 0.2, 0.5])
        X_AB = RigidTransform(R_AB, [0.3, 0.4, 0.1])
        X_CB = RigidTransform(R_CB, [0.5, 0.9, 0.7])

        test_X_WB = f(X_WA, X_AB, X_CB)
        true_X_WB = X_WA.multiply(X_AB)

        test_result = test_X_WB.multiply(true_X_WB.inverse())
        test_result = test_result.GetAsMatrix4()
        self.assertTrue(np.allclose(test_result, np.eye(4)))
示例#13
0
 def weld_paper_edge(self, pedestal_y_dim, pedestal_z_dim):
     """
     Fixes an edge of the paper to the pedestal
     """
     # Fix paper to object
     self.plant.WeldFrames(
         self.plant.world_frame(),
         self.plant.get_body(BodyIndex(self.link_idxs[0])).body_frame(),
         RigidTransform(RotationMatrix(
         ), [0, 0, pedestal_z_dim+self.z_dim/2])
     )
示例#14
0
def grasp_poses_example():
    builder = DiagramBuilder()

    plant, scene_graph = AddMultibodyPlantSceneGraph(builder, time_step=0.0)
    parser = Parser(plant, scene_graph)
    grasp = parser.AddModelFromFile(
        FindResourceOrThrow(
            "drake/manipulation/models/wsg_50_description/sdf/schunk_wsg_50_no_tip.sdf"
        ), "grasp")
    brick = parser.AddModelFromFile(
        FindResourceOrThrow(
            "drake/examples/manipulation_station/models/061_foam_brick.sdf"))
    plant.Finalize()

    meshcat = ConnectMeshcatVisualizer(builder, scene_graph)

    diagram = builder.Build()
    context = diagram.CreateDefaultContext()
    plant_context = plant.GetMyContextFromRoot(context)

    B_O = plant.GetBodyByName("base_link", brick)
    X_WO = plant.EvalBodyPoseInWorld(plant_context, B_O)

    B_Ggrasp = plant.GetBodyByName("body", grasp)
    p_GgraspO = [0, 0.12, 0]
    R_GgraspO = RotationMatrix.MakeXRotation(np.pi / 2.0).multiply(
        RotationMatrix.MakeZRotation(np.pi / 2.0))

    X_GgraspO = RigidTransform(R_GgraspO, p_GgraspO)
    X_OGgrasp = X_GgraspO.inverse()
    X_WGgrasp = X_WO.multiply(X_OGgrasp)

    plant.SetFreBodyPose(plant_context, B_Ggrasp, X_WGgrasp)
    # Open the fingers, too
    plant.GetJointByName("left_finger_sliding_joint",
                         grasp).set_translation(plant_context, -0.054)
    plant.GetJointByName("right_finger_sliding_joint",
                         grasp).set_translation(plant_context, 0.054)

    meshcat.load()
    diagram.Publish(context)
示例#15
0
def addArm(plant, scene_graph=None):
    """
    Creates the panda arm.
    """
    parser = pydrake.multibody.parsing.Parser(plant, scene_graph)
    arm_instance = parser.AddModelFromFile("models/panda_arm.urdf")

    panda_offset = 0
    if config.num_links == config.NumLinks.TWO:
        panda_offset = IN_TO_M * 22
    elif config.num_links == config.NumLinks.FOUR:
        panda_offset = IN_TO_M * 40
    # Weld panda to world
    plant.WeldFrames(
        plant.world_frame(), plant.GetFrameByName("panda_link0", arm_instance),
        RigidTransform(RotationMatrix().MakeZRotation(np.pi),
                       [0, panda_offset, 0]))

    sphere_body = plant.AddRigidBody(
        "sphere_body", arm_instance,
        pydrake.multibody.tree.SpatialInertia(
            mass=MASS,
            p_PScm_E=np.array([0., 0., 0.]),
            G_SP_E=pydrake.multibody.tree.UnitInertia(1.0, 1.0, 1.0)))

    if USE_MESH:
        addMeshEndEffector(plant, scene_graph, sphere_body)
    else:
        addPrimitivesEndEffector(plant, scene_graph, sphere_body, arm_instance)

    X_P_S = RigidTransform(
        RotationMatrix.MakeZRotation(np.pi / 4).multiply(
            RotationMatrix.MakeXRotation(-np.pi / 2)),
        [0, 0, 0.065])  # Roughly aligns axes with world axes
    plant.WeldFrames(plant.GetFrameByName("panda_link8", arm_instance),
                     plant.GetFrameByName("sphere_body", arm_instance), X_P_S)

    return arm_instance
示例#16
0
def interpolatePosesLinear(T_world_poseA, T_world_poseB, t):
    # assume t is between 0 and 1; responsibility of caller to scale time
    p_A, r_A = T_world_poseA.translation(), T_world_poseA.rotation()
    p_B, r_B = T_world_poseB.translation(), T_world_poseB.rotation()

    # rotation is a clean slerp
    r_slerp = orientation_slerp(r_A, r_B)
    r_curr = RotationMatrix(r_slerp.value(t))

    # position is a straight line
    p_curr = p_A + t * (p_B - p_A)

    T_world_currGripper = RigidTransform(r_curr, p_curr)
    return T_world_currGripper
示例#17
0
def PendulumExample():
    builder = DiagramBuilder()
    plant = builder.AddSystem(PendulumPlant())
    scene_graph = builder.AddSystem(SceneGraph())
    PendulumGeometry.AddToBuilder(builder, plant.get_state_output_port(),
                                  scene_graph)
    MeshcatVisualizerCpp.AddToBuilder(
        builder, scene_graph, meshcat,
        MeshcatVisualizerParams(publish_period=np.inf))

    builder.ExportInput(plant.get_input_port(), "torque")
    # Note: The Pendulum-v0 in gym outputs [cos(theta), sin(theta), thetadot]
    builder.ExportOutput(plant.get_state_output_port(), "state")

    # Add a camera (I have sugar for this in the manip repo)
    X_WC = RigidTransform(RotationMatrix.MakeXRotation(-np.pi / 2),
                          [0, -1.5, 0])
    rgbd = AddRgbdSensor(builder, scene_graph, X_WC)
    builder.ExportOutput(rgbd.color_image_output_port(), "camera")

    diagram = builder.Build()
    simulator = Simulator(diagram)

    def reward(system, context):
        plant_context = plant.GetMyContextFromRoot(context)
        state = plant_context.get_continuous_state_vector()
        u = plant.get_input_port().Eval(plant_context)[0]
        theta = state[0] % 2 * np.pi  # Wrap to 2*pi
        theta_dot = state[1]
        return (theta - np.pi)**2 + 0.1 * theta_dot**2 + 0.001 * u

    max_torque = 3
    env = DrakeGymEnv(simulator,
                      time_step=0.05,
                      action_space=gym.spaces.Box(low=np.array([-max_torque]),
                                                  high=np.array([max_torque])),
                      observation_space=gym.spaces.Box(
                          low=np.array([-np.inf, -np.inf]),
                          high=np.array([np.inf, np.inf])),
                      reward=reward,
                      render_rgb_port_id="camera")
    check_env(env)

    if show:
        env.reset()
        image = env.render(mode='rgb_array')
        fig, ax = plt.subplots()
        ax.imshow(image)
        plt.show()
示例#18
0
    def get_ik(self, t):
        epsilon = 1e-2
        ik = InverseKinematics(plant=self.plant, with_joint_limits=True)

        if self.q_guess is None:
            context = self.plant.CreateDefaultContext()
            self.q_guess = self.plant.GetPositions(context)
            # This helps get the solver out of the saddle point when knee joint are locked (0.0)
            self.q_guess[getJointIndexInGeneralizedPositions(
                self.plant, 'l_leg_kny')] = 0.1
            self.q_guess[getJointIndexInGeneralizedPositions(
                self.plant, 'r_leg_kny')] = 0.1

        for trajectory in self.trajectories:
            position = trajectory.position_traj.value(t)
            ik.AddPositionConstraint(
                frameB=self.plant.GetFrameByName(trajectory.frame),
                p_BQ=trajectory.position_offset,
                frameA=self.plant.world_frame(),
                p_AQ_upper=position + trajectory.position_tolerance,
                p_AQ_lower=position - trajectory.position_tolerance)

            if trajectory.orientation_traj is not None:
                orientation = trajectory.orientation_traj.value(t)
                ik.AddOrientationConstraint(
                    frameAbar=self.plant.world_frame(),
                    R_AbarA=RotationMatrix.Identity(),
                    frameBbar=self.plant.GetFrameByName(trajectory.frame),
                    R_BbarB=RotationMatrix(orientation),
                    theta_bound=trajectory.orientation_tolerance)

        result = Solve(ik.prog(), self.q_guess)
        if result.is_success():
            return result.GetSolution(ik.q())
        else:
            raise Exception("Failed to find IK solution!")
示例#19
0
def get_box_from_geom(scene_graph, visual_only=True):
    # https://github.com/RussTedrake/underactuated/blob/master/src/underactuated/meshcat_visualizer.py
    # https://github.com/RobotLocomotion/drake/blob/master/lcmtypes/lcmt_viewer_draw.lcm
    mock_lcm = DrakeMockLcm()
    DispatchLoadMessage(scene_graph, mock_lcm)
    load_robot_msg = lcmt_viewer_load_robot.decode(
        mock_lcm.get_last_published_message("DRAKE_VIEWER_LOAD_ROBOT"))

    box_from_geom = {}
    for body_index in range(load_robot_msg.num_links):
        # 'geom', 'name', 'num_geom', 'robot_num'
        link = load_robot_msg.link[body_index]
        [source_name, frame_name] = link.name.split("::")
        model_index = link.robot_num

        visual_index = 0
        for geom in sorted(link.geom,
                           key=lambda g: g.position[::-1]):  # sort by z, y, x
            # 'color', 'float_data', 'num_float_data', 'position', 'quaternion', 'string_data', 'type'
            if visual_only and (geom.color[3] == 0):
                continue

            visual_index += 1
            if geom.type == geom.BOX:
                assert geom.num_float_data == 3
                [width, length, height] = geom.float_data
                extent = np.array([width, length, height]) / 2.
            elif geom.type == geom.SPHERE:
                assert geom.num_float_data == 1
                [radius] = geom.float_data
                extent = np.array([radius, radius, radius])
            elif geom.type == geom.CYLINDER:
                assert geom.num_float_data == 2
                [radius, height] = geom.float_data
                extent = np.array([radius, radius, height / 2.])
                # In Drake, cylinders are along +z
            else:
                continue
            link_from_box = RigidTransform(
                RotationMatrix(Quaternion(geom.quaternion)),
                geom.position).GetAsIsometry3()
            box_from_geom[model_index, frame_name, visual_index-1] = \
                (BoundingBox(np.zeros(3), extent), link_from_box, get_geom_name(geom))
    return box_from_geom
示例#20
0
def least_squares_transform(scene, model):
    X_BA = RigidTransform()
    mu_m = np.mean(model, axis=0)
    mu_s = np.mean(scene, axis=0)

    W = (scene - mu_s).T.dot(model - mu_m)
    U, Sigma, Vh = np.linalg.svd(W)
    R_star = U.dot(Vh)

    if np.linalg.det(R_star) < 0:
        Vh[-1] *= -1
        R_star = U.dot(Vh)

    t_star = mu_s - R_star.dot(mu_m)

    X_BA.set_rotation(RotationMatrix(R_star))
    X_BA.set_translation(t_star)

    return X_BA
示例#21
0
def interpolatePosesArcMotion(T_world_poseA, T_world_poseB, t):
    # assume t is between 0 and 1; responsibility of caller to scale time
    p_A, r_A = T_world_poseA.translation(), T_world_poseA.rotation()
    p_B, r_B = T_world_poseB.translation(), T_world_poseB.rotation()

    # rotation is a clean slerp
    r_slerp = orientation_slerp(r_A, r_B)
    r_curr = RotationMatrix(r_slerp.value(t))

    # position is an arc that isn't necessarily axis aligned
    arc_radius = p_B[2] - p_A[2]
    phi = np.arctan2(p_B[1] - p_A[1], p_B[0] - p_A[0])  #xy direction heading
    theta = (t - 1) * np.pi / 2  # -90 to 0 degrees
    p_curr = p_A + np.array([
        arc_radius * np.cos(theta) * np.cos(phi), arc_radius * np.cos(theta) *
        np.sin(phi), arc_radius * np.sin(theta) + arc_radius
    ])

    T_world_currGripper = RigidTransform(r_curr, p_curr)
    return T_world_currGripper
示例#22
0
    def test_grasp_pose(self):
        """Testing grasp pose"""
        f = self.notebook_locals['design_grasp_pose']
        X_WO = self.notebook_locals['X_WO']

        test_X_OG, test_X_WG = f(X_WO)

        R_OG = RotationMatrix(np.array([[0, 0, 1], [1, 0, 0], [0, 1, 0]]).T)
        p_OG = [-0.02, 0, 0]
        X_OG = RigidTransform(R_OG, p_OG)
        X_WG = X_WO.multiply(X_OG)

        test_X_CW = f(X_WO)

        test_result = test_X_OG.multiply(X_OG.inverse())
        test_result = test_result.GetAsMatrix4()
        self.assertTrue(np.allclose(test_result, np.eye(4)))

        test_result = test_X_WG.multiply(X_WG.inverse())
        test_result = test_result.GetAsMatrix4()
        self.assertTrue(np.allclose(test_result, np.eye(4)))
示例#23
0
def addPrimitivesEndEffector(plant, scene_graph, sphere_body, arm_instance):
    end_effector_RT = RigidTransform(
        p=[0, -fraction_of_lower_hemisphere * RADIUS, 0])
    if plant.geometry_source_is_registered():
        plant.RegisterCollisionGeometry(
            sphere_body, end_effector_RT, pydrake.geometry.Sphere(RADIUS),
            "sphere_body", pydrake.multibody.plant.CoulombFriction(1, 1))
        plant.RegisterVisualGeometry(sphere_body, end_effector_RT,
                                     pydrake.geometry.Sphere(RADIUS),
                                     "sphere_body", [.9, .5, .5, 1.0])  # Color

    if USE_BOX:
        partial_radius = RADIUS * (1 - fraction_of_lower_hemisphere**2)**0.5
        box_side = partial_radius * 2
        box_height = RADIUS * (1 - fraction_of_lower_hemisphere)
        box = pydrake.geometry.Box(box_side, box_side, box_height)
        box_body = plant.AddRigidBody(
            "box_body", arm_instance,
            pydrake.multibody.tree.SpatialInertia(
                mass=MASS,
                p_PScm_E=np.array([0., 0., 0.]),
                G_SP_E=pydrake.multibody.tree.UnitInertia(1.0, 1.0, 1.0)))
        if plant.geometry_source_is_registered():
            plant.RegisterCollisionGeometry(
                box_body, RigidTransform(), box, "box_body",
                pydrake.multibody.plant.CoulombFriction(1, 1))
            plant.RegisterVisualGeometry(box_body, RigidTransform(), box,
                                         "box_body",
                                         [.9, .5, .5, 1.0])  # Color

            geometries = plant.CollectRegisteredGeometries(
                [box_body, sphere_body,
                 plant.GetBodyByName("panda_link8")])
            scene_graph.collision_filter_manager().Apply(
                CollisionFilterDeclaration().ExcludeWithin(geometries))
        plant.WeldFrames(
            plant.GetFrameByName("panda_link8", arm_instance),
            plant.GetFrameByName("box_body", arm_instance),
            RigidTransform(R=RotationMatrix.MakeZRotation(np.pi / 4),
                           p=[0, 0, 0.065 - box_height / 2]))
示例#24
0
    def calc_x0(self, context, output):
        x0 = np.zeros(6)

        # Calc X_L_SP, the transform from the link to the setpoint
        X_L_SP = RigidTransform()  # For now, just make them the same

        # Calc X_W_L
        pose_L_rotational = self.GetInputPort("pose_L_rotational").Eval(
            context)
        pose_L_translational = self.GetInputPort("pose_L_translational").Eval(
            context)
        X_W_L = RigidTransform(p=pose_L_translational,
                               R=RotationMatrix(
                                   RollPitchYaw(pose_L_rotational)))

        # Calc X_W_SP
        X_W_SP = X_W_L.multiply(X_L_SP)

        translation = X_W_SP.translation()
        rotation = RollPitchYaw(X_W_SP.rotation()).vector()

        x0[:3] = rotation
        x0[3:] = translation
        output.SetFromVector(x0)
        new_rbt = RigidBodyTree()
        add_single_instance_to_rbt(new_rbt, config, instance_config,
                                   instance_j)
        new_rbt.compile()
        single_object_rbts.append(new_rbt)

    all_points = []
    all_points_normals = []
    all_points_labels = []
    all_points_distances = [[] for i in range(len(config["instances"]))]

    for i, viewpoint in enumerate(camera_config["viewpoints"]):
        camera_tf = lookat(viewpoint["eye"], viewpoint["target"],
                           viewpoint["up"])
        camera_tf = camera_tf.dot(transform_inverse(depth_camera_pose))
        camera_rpy = RollPitchYaw(RotationMatrix(camera_tf[0:3, 0:3]))
        q0 = np.zeros(6)
        q0[0:3] = camera_tf[0:3, 3]
        q0[3:6] = camera_rpy.vector()
        depth_image_name = "%09d_depth.png" % (i)
        depth_image = np.array(
            imageio.imread(os.path.join(args.dir, depth_image_name))) / 1000.
        depth_image_drake = Image[PixelType.kDepth32F](depth_image.shape[1],
                                                       depth_image.shape[0])
        depth_image_drake.mutable_data[:, :, 0] = depth_image[:, :]
        points = camera.ConvertDepthImageToPointCloud(
            depth_image_drake, camera.depth_camera_info())
        good_points_mask = np.all(np.isfinite(points), axis=0)
        points = points[:, good_points_mask]
        points = np.vstack([points, np.ones([1, points.shape[1]])])
        # Transform them to camera base frame
示例#26
0
def AddPedestal(plant):
    """
    Creates the pedestal.
    """

    pedestal_instance = plant.AddModelInstance("pedestal_base")

    bodies = []
    names = ["left", "right"]
    x_positions = [
        (PLYWOOD_LENGTH - THICK_PLYWOOD_THICKNESS)/2,
        -(PLYWOOD_LENGTH - THICK_PLYWOOD_THICKNESS)/2
    ]

    for name, x_position in zip(names, x_positions):
        full_name = "pedestal_" + name + "_body"
        body =  plant.AddRigidBody(
            full_name,
            pedestal_instance,
            SpatialInertia(mass=1, # Doesn't matter because it's welded
                            p_PScm_E=np.array([0., 0., 0.]),
                            G_SP_E=UnitInertia.SolidBox(
                                THICK_PLYWOOD_THICKNESS,
                                PEDESTAL_Y_DIM,
                                PEDESTAL_Z_DIM)
        ))

        if plant.geometry_source_is_registered():
            plant.RegisterCollisionGeometry(
                body,
                RigidTransform(),
                pydrake.geometry.Box(
                    THICK_PLYWOOD_THICKNESS,
                    PEDESTAL_Y_DIM,
                    PLYWOOD_LENGTH
                ),
                full_name,
                pydrake.multibody.plant.CoulombFriction(1, 1)
            )

            plant.RegisterVisualGeometry(
                body,
                RigidTransform(),
                pydrake.geometry.Box(
                    THICK_PLYWOOD_THICKNESS,
                    PEDESTAL_Y_DIM,
                    PLYWOOD_LENGTH
                ),
                full_name,
                [0.4, 0.4, 0.4, 1])  # RGBA color

            plant.WeldFrames(
                plant.world_frame(),
                plant.GetFrameByName(full_name, pedestal_instance),
                RigidTransform(RotationMatrix(), [
                    x_position,
                    0,
                    PLYWOOD_LENGTH/2+ bump_z
                ]
            ))
    
    # Add bump at the bottom
    full_name = "pedestal_bottom_body"
    body =  plant.AddRigidBody(
        full_name,
        pedestal_instance,
        SpatialInertia(mass=1, # Doesn't matter because it's welded
                        p_PScm_E=np.array([0., 0., 0.]),
                        G_SP_E=UnitInertia.SolidBox(
                            PEDESTAL_X_DIM,
                            PEDESTAL_Y_DIM,
                            bump_z)
    ))

    if plant.geometry_source_is_registered():
        plant.RegisterCollisionGeometry(
            body,
            RigidTransform(),
            pydrake.geometry.Box(
                PEDESTAL_X_DIM,
                PEDESTAL_Y_DIM,
                bump_z
            ),
            full_name,
            pydrake.multibody.plant.CoulombFriction(1, 1)
        )

        plant.RegisterVisualGeometry(
            body,
            RigidTransform(),
            pydrake.geometry.Box(
                PEDESTAL_X_DIM,
                PEDESTAL_Y_DIM,
                bump_z
            ),
            full_name,
            [0.4, 0.4, 0.4, 1])  # RGBA color

        plant.WeldFrames(
            plant.world_frame(),
            plant.GetFrameByName(full_name, pedestal_instance),
            RigidTransform(RotationMatrix(), [
                0,
                0,
                bump_z/2
            ]
        ))

    return pedestal_instance
示例#27
0
    def GetManipulatorDynamics(self, q, qd):
        # Input argument
        #- q = [x, y, z, q0, q1, q2, q3]\in \mathbb{R}^7
        #- qd= [vx, vy, vz, wx, wy, wz ]\in \mathbb{R}^6

        x = q[0]  # (x,y,z) in inertial frame
        y = q[1]
        z = q[2]
        q0 = q[3]  # q0+q1i+q2j+qk
        q1 = q[4]
        q2 = q[5]
        q3 = q[6]
        xi1 = q[7]

        vx = qd[0]  # CoM velocity in inertial frame
        vy = qd[1]
        vz = qd[2]
        wx = qd[3]  # Body velocity in "body frame"
        wy = qd[4]
        wz = qd[5]
        xi2 = qd[6]

        # Stack up the state q
        x = np.hstack([q, qd])
        # print("x:",x)
        qv = np.vstack([q1, q2, q3])
        r = np.array([x, y, z])
        # print("qv",qv.shape)
        v = np.vstack([vx, vy, vz])
        quat_vec = np.vstack([q0, q1, q2, q3])
        # print("quat_vec",quat_vec.shape)
        w = np.array([wx, wy, wz])
        q_norm = np.sqrt(np.dot(quat_vec.T, quat_vec))
        q_normalized = quat_vec / q_norm
        # print("q_norm: ",q_norm)

        quat = Quaternion(q_normalized)  # Quaternion
        Rq = RotationMatrix(quat).matrix()  # Rotational matrix
        # print("\n#######")

        # Translation from w to \dot{q}
        Eq = np.zeros((3, 4))

        # Eq for body frame
        Eq[0, :] = np.array([-1 * q1, q0, 1 * q3,
                             -1 * q2])  # [-1*q1,    q0, -1*q3,    q2]
        Eq[1, :] = np.array([-1 * q2, -1 * q3, q0,
                             1 * q1])  # [-1*q2,    q3,    q0, -1*q1]
        Eq[2, :] = np.array([-1 * q3, 1 * q2, -1 * q1,
                             q0])  # [-1*q3, -1*q2,    q1,    q0]

        # Eq for world frame
        # Eq[0,:] = np.array([-1*q1,    q0, -1*q3,    q2])
        # Eq[1,:] = np.array([-1*q2,    q3,    q0, -1*q1])
        # Eq[2,:] = np.array([-1*q3, -1*q2,    q1,    q0])

        #  quat_dot = np.dot(Eq.T,w)/2.   # \dot{quat}=1/2*E(q)^Tw

        # w x (Iw)

        I = np.zeros((3, 3))  # Intertia matrix
        Ixx = self.Ixx
        Iyy = self.Iyy
        Izz = self.Izz
        I[0, 0] = Ixx
        I[1, 1] = Iyy
        I[2, 2] = Izz
        I_inv = np.linalg.inv(I)
        Iw = np.dot(I, w)
        wIw = np.cross(w.T, Iw.T).T

        return (Rq, Eq, wIw, I_inv)
示例#28
0
def plan_prethrow_pose(
    T_world_robotInitial,
    p_world_target,  # np.array of shape (3,)
    gripper_to_object_dist,
    throw_height=0.5,  # meters
    prethrow_height=0.2,
    prethrow_radius=0.4,
    throw_angle=np.pi / 4.0,
    meshcat=None,
    throw_speed_adjustment_factor=1.0,
):
    """
    only works with the "back portion" of the clutter station until we figure out how to move the bins around
    motion moves along an arc from a "pre throw" to a "throw" position
    """
    theta = 1.0 * np.arctan2(p_world_target[1], p_world_target[0])
    print(f"theta={theta}")

    T_world_prethrow = RigidTransform(
        p=np.array([
            prethrow_radius * np.cos(theta), prethrow_radius * np.sin(theta),
            prethrow_height
        ]),
        R=RotationMatrix.MakeXRotation(-np.pi / 2).multiply(
            RotationMatrix.MakeYRotation((theta - np.pi / 2))))

    throw_radius = throw_height - prethrow_height
    T_world_throw = RigidTransform(
        p=T_world_prethrow.translation() + np.array([
            throw_radius * np.cos(theta), throw_radius * np.sin(theta),
            throw_height - prethrow_height
        ]),
        R=RotationMatrix.MakeXRotation(-np.pi / 2).multiply(
            RotationMatrix.MakeYRotation((theta - np.pi / 2)).multiply(
                RotationMatrix.MakeXRotation(-np.pi / 2))))

    if meshcat:
        visualize_transform(meshcat, "T_world_prethrow", T_world_prethrow)
        visualize_transform(meshcat, "T_world_throw", T_world_throw)

    p_world_object_at_launch = interpolatePosesArcMotion(
        T_world_prethrow, T_world_throw, t=throw_angle /
        (np.pi / 2.)).translation() + np.array([0, 0, -gripper_to_object_dist])
    pdot_world_launch = interpolatePosesArcMotion_pdot(T_world_prethrow,
                                                       T_world_throw,
                                                       t=throw_angle /
                                                       (np.pi / 2.))
    launch_speed_base = np.linalg.norm(pdot_world_launch)
    launch_speed_required = get_launch_speed_required(
        theta=throw_angle,
        x=np.linalg.norm(p_world_target[:2]) -
        np.linalg.norm(p_world_object_at_launch[:2]),
        y=p_world_target[2] - p_world_object_at_launch[2])
    total_throw_time = launch_speed_base / launch_speed_required / throw_speed_adjustment_factor
    # print(f"p_world_object_at_launch={p_world_object_at_launch}")
    # print(f"target={p_world_target}")
    # print(f"dx={np.linalg.norm(p_world_target[:2]) - np.linalg.norm(p_world_object_at_launch[:2])}")
    # print(f"dy={p_world_target[2] - p_world_object_at_launch[2]}")
    # print(f"pdot_world_launch={pdot_world_launch}")
    # print(f"total_throw_time={total_throw_time}")

    return T_world_prethrow, T_world_throw
示例#29
0
def plan_throw(
    T_world_robotInitial,
    T_world_gripperObject,
    p_world_target,  # np.array of shape (3,)
    gripper_to_object_dist,
    throw_height=0.5,  # meters
    prethrow_height=0.2,
    prethrow_radius=0.4,
    throw_angle=np.pi / 4.0,
    meshcat=None,
    throw_speed_adjustment_factor=1.0,
):
    """
    only works with the "back portion" of the clutter station until we figure out how to move the bins around
    motion moves along an arc from a "pre throw" to a "throw" position
    """
    theta = 1.0 * np.arctan2(p_world_target[1], p_world_target[0])
    print(f"theta={theta}")

    T_world_prethrow = RigidTransform(
        p=np.array([
            prethrow_radius * np.cos(theta), prethrow_radius * np.sin(theta),
            prethrow_height
        ]),
        R=RotationMatrix.MakeXRotation(-np.pi / 2).multiply(
            RotationMatrix.MakeYRotation((theta - np.pi / 2))))

    throw_radius = throw_height - prethrow_height
    T_world_throw = RigidTransform(
        p=T_world_prethrow.translation() + np.array([
            throw_radius * np.cos(theta), throw_radius * np.sin(theta),
            throw_height - prethrow_height
        ]),
        R=RotationMatrix.MakeXRotation(-np.pi / 2).multiply(
            RotationMatrix.MakeYRotation((theta - np.pi / 2)).multiply(
                RotationMatrix.MakeXRotation(-np.pi / 2))))

    if meshcat:
        visualize_transform(meshcat, "T_world_prethrow", T_world_prethrow)
        visualize_transform(meshcat, "T_world_throw", T_world_throw)

    p_world_object_at_launch = interpolatePosesArcMotion(
        T_world_prethrow, T_world_throw, t=throw_angle /
        (np.pi / 2.)).translation() + np.array([0, 0, -gripper_to_object_dist])
    pdot_world_launch = interpolatePosesArcMotion_pdot(T_world_prethrow,
                                                       T_world_throw,
                                                       t=throw_angle /
                                                       (np.pi / 2.))
    launch_speed_base = np.linalg.norm(pdot_world_launch)
    launch_speed_required = get_launch_speed_required(
        theta=throw_angle,
        x=np.linalg.norm(p_world_target[:2]) -
        np.linalg.norm(p_world_object_at_launch[:2]),
        y=p_world_target[2] - p_world_object_at_launch[2])
    total_throw_time = launch_speed_base / launch_speed_required / throw_speed_adjustment_factor
    print(f"p_world_object_at_launch={p_world_object_at_launch}")
    print(f"target={p_world_target}")
    print(
        f"dx={np.linalg.norm(p_world_target[:2]) - np.linalg.norm(p_world_object_at_launch[:2])}"
    )
    print(f"dy={p_world_target[2] - p_world_object_at_launch[2]}")
    print(f"pdot_world_launch={pdot_world_launch}")
    print(f"total_throw_time={total_throw_time}")

    T_world_hackyWayPoint = RigidTransform(
        p=[-.6, -0.0, 0.6],
        R=RotationMatrix.MakeXRotation(
            -np.pi / 2.0
        ),  #R_WORLD_PRETHROW, #RotationMatrix.MakeXRotation(-np.pi/2.0),
    )

    # event timings (not all are relevant to pose and gripper)
    # initial pose => prethrow => throw => yays
    t_goToObj = 1.0
    t_holdObj = 0.5
    t_goToPreobj = 1.0
    t_goToWaypoint = 1.0
    t_goToPrethrow = 1.0
    t_goToRelease = total_throw_time * throw_angle / (np.pi / 2.)
    t_goToThrowEnd = total_throw_time * (1 - throw_angle / (np.pi / 2.))
    t_throwEndHold = 3.0
    ts = np.array([
        t_goToObj, t_holdObj, t_goToPreobj, t_goToWaypoint, t_goToPrethrow,
        t_goToRelease, t_goToThrowEnd, t_throwEndHold
    ])
    cum_pose_ts = np.cumsum(ts)
    print(cum_pose_ts)

    # Create pose trajectory
    t_lst = np.linspace(0, cum_pose_ts[-1], 1000)
    pose_lst = []
    for t in t_lst:
        if t < cum_pose_ts[1]:
            pose = interpolatePosesLinear(T_world_robotInitial,
                                          T_world_gripperObject,
                                          min(t / ts[0], 1.0))
        elif t < cum_pose_ts[2]:
            pose = interpolatePosesLinear(T_world_gripperObject,
                                          T_world_robotInitial,
                                          (t - cum_pose_ts[1]) / ts[2])
        elif t < cum_pose_ts[3]:
            pose = interpolatePosesLinear(T_world_robotInitial,
                                          T_world_hackyWayPoint,
                                          (t - cum_pose_ts[2]) / ts[3])
        elif t <= cum_pose_ts[4]:
            pose = interpolatePosesLinear(T_world_hackyWayPoint,
                                          T_world_prethrow,
                                          (t - cum_pose_ts[3]) / ts[4])
        else:
            pose = interpolatePosesArcMotion(
                T_world_prethrow, T_world_throw,
                min((t - cum_pose_ts[4]) / (ts[5] + ts[6]), 1.0))
        pose_lst.append(pose)

    # Create gripper trajectory.
    gripper_times_lst = np.array([
        0., t_goToObj, t_holdObj, t_goToPreobj, t_goToWaypoint, t_goToPrethrow,
        t_goToRelease, t_goToThrowEnd, t_throwEndHold
    ])
    gripper_cumulative_times_lst = np.cumsum(gripper_times_lst)
    GRIPPER_OPEN = 0.5
    GRIPPER_CLOSED = 0.0
    gripper_knots = np.array([
        GRIPPER_OPEN, GRIPPER_OPEN, GRIPPER_CLOSED, GRIPPER_CLOSED,
        GRIPPER_CLOSED, GRIPPER_CLOSED, GRIPPER_CLOSED, GRIPPER_OPEN,
        GRIPPER_CLOSED
    ]).reshape(1, gripper_times_lst.shape[0])
    g_traj = PiecewisePolynomial.FirstOrderHold(gripper_cumulative_times_lst,
                                                gripper_knots)

    return t_lst, pose_lst, g_traj
示例#30
0
# print("num_iteration,:", num_iteration)
# print("state_out dimension:,:", state_out.shape)
rpy = np.zeros((3, num_iteration))  # Convert to Euler Angle
ubar = np.zeros((4, num_iteration))  # Convert to Euler Angle
u = np.zeros((4, num_iteration))  # Convert to Euler Angle

for j in range(0, num_iteration):

    # ubar[:,j]=test_Feedback_Linearization_controller_BS(state_out[:,j])
    ubar[:, j] = input_out[:, j]
    q_temp = state_out[3:7, j]
    q_temp_norm = math.sqrt(np.dot(q_temp, q_temp))
    q_temp = q_temp / q_temp_norm
    quat_temp = Quaternion(q_temp)  # Quaternion

    R = RotationMatrix(quat_temp)
    rpy[:, j] = RollPitchYaw(R).vector()
    u[:, j] = ubar[:, j]
    u[0, j] = state_out[7, j]  # Control

# print(u)

# Visualize state and input traces
# print("times",state_log.data()[1,:])RollPitchYaw

if show_flag_q == 1:
    plt.clf()
    fig = plt.figure(1).set_size_inches(6, 6)
    for i in range(0, 3):
        # print("i:%d" %i)
        plt.subplot(3, 1, i + 1)