示例#1
0
    def generate_frame_variant(self, frame, options=None):
        """....

        Parameters
        ----------
        frame : :class:`compas.geometry.Frame`
            TODO

        Yield
        -----
        frame
            frame variantion
        """
        # overwrite if options is provided in the function call
        options = options or self._options

        # * half range
        delta_roll = is_valid_option(options, 'delta_roll', 0.)
        delta_pitch = is_valid_option(options, 'delta_pitch', 0.)
        delta_yaw = is_valid_option(options, 'delta_yaw', 0.)

        # * discretization
        roll_sample_size = is_valid_option(options, 'roll_sample_size', 1)
        pitch_sample_size = is_valid_option(options, 'pitch_sample_size', 1)
        yaw_sample_size = is_valid_option(options, 'yaw_sample_size', 1)

        roll_gen = np.linspace(-delta_roll, delta_roll, num=roll_sample_size)
        pitch_gen = np.linspace(-delta_pitch, delta_pitch, num=pitch_sample_size)
        yaw_gen = np.linspace(-delta_yaw, delta_yaw, num=yaw_sample_size)
        pose = pose_from_frame(frame)
        # a finite generator
        for roll, pitch, yaw in product(roll_gen, pitch_gen, yaw_gen):
            yield frame_from_pose(multiply(pose, Pose(euler=Euler(roll=roll, pitch=pitch, yaw=yaw))))
def compute_circle_path(circle_center=np.array([2, 0, 0.2]),
                        circle_r=0.2,
                        angle_range=(-0.5 * np.pi, 0.5 * np.pi)):
    # generate a circle path to test IK and Cartesian planning
    ee_poses = []
    n_pt = int(abs(angle_range[1] - angle_range[0]) / (np.pi / 180 * 5))
    for a in np.linspace(*angle_range, num=n_pt):
        pt = circle_center + circle_r * np.array([np.cos(a), np.sin(a), 0])
        circ_pose = multiply(Pose(point=pt, euler=Euler(yaw=a + np.pi / 2)),
                             Pose(euler=Euler(roll=np.pi * 3 / 4)))
        draw_pose(circ_pose, length=0.01)
        ee_poses.append(circ_pose)
    return ee_poses
def sequenced_picknplace_plan(assembly_pkg_json_path, solve_method='sparse_ladder_graph', viewer=False, scale=1e-3,
    sample_time=5, sparse_time_out=2, jt_res=0.1, pos_step_size=0.01, ori_step_size=np.pi/16,
    viz_inspect=False, warning_pause=False, save_dir=None, **kwargs):
    """call pychoreo's planner to solve for picknplace Cartesian & transition trajectories.

    Robot setup is specified in coop_assembly.choreo_interface.robot_setup

    Parameters
    ----------
    assembly_pkg_json_path : [type]
        [description]
    solve_method : str, optional
        [description], by default 'sparse_ladder_graph'
    viewer : bool, optional
        [description], by default False
    scale : [type], optional
        [description], by default 1e-3
    sample_time : int, optional
        [description], by default 5
    sparse_time_out : int, optional
        [description], by default 2
    jt_res : float, optional
        joint resolution for transition planning, by default 0.1
    pos_step_size : float, optional
        interpolation step size for the end effector Cartesia movements, by default 0.01 (meter)
    ori_step_size : [type], optional
        [description], by default np.pi/16
    viz_inspect : bool, optional
        visualize planning process in a pybullet window (slow!), by default False
    warning_pause : bool, optional
        wait for user input if any of the planning process cannot find a solution, by default False
    save_dir : string, optional
        absolute directory path to save the planning result json file, by default None

    Returns
    -------
    [type]
        [description]

    Raises
    ------
    ValueError
        [description]
    """
    # TODO: assert solve method in avaiable list

    # * load robot setup data
    (robot_urdf, base_link_name, tool_root_link_name, ee_link_name, ik_joint_names, disabled_self_collision_link_names), \
    (workspace_urdf, workspace_robot_disabled_link_names) = get_picknplace_robot_data()
    picknplace_end_effector_urdf = get_picknplace_end_effector_urdf()
    picknplace_tcp_def = get_picknplace_tcp_def()

    # * create robot and pb environment
    connect(use_gui=viewer)

    # * adjust camera pose (optional)
    if has_gui():
        camera_base_pt = (0,0,0)
        camera_pt = np.array(camera_base_pt) + np.array([1, 0, 0.5])
        set_camera_pose(tuple(camera_pt), camera_base_pt)

    with HideOutput():
        # * pybullet can handle ROS-package path URDF automatically now (ver 2.5.7)!
        robot = load_pybullet(robot_urdf, fixed_base=True)
        workspace = load_pybullet(workspace_urdf, fixed_base=True)
        # ee_body = create_obj(picknplace_end_effector_urdf)
        ee_body = load_pybullet(picknplace_end_effector_urdf)

    # * set robot idle configuration
    ik_joints = joints_from_names(robot, ik_joint_names)
    robot_start_conf = get_robot_init_conf()
    set_joint_positions(robot, ik_joints, robot_start_conf)

    # * create tool and tool TCP from flange (tool0) transformation
    root_link = link_from_name(robot, tool_root_link_name)
    # create end effector body
    ee_attach = Attachment(robot, root_link, unit_pose(), ee_body)
    # set up TCP transformation, just a renaming here
    root_from_tcp = picknplace_tcp_def
    if has_gui() :
        # draw_tcp pose
        ee_attach.assign()
        ee_link_pose = get_pose(ee_attach.child)
        draw_pose(multiply(ee_link_pose, root_from_tcp))

    # * specify ik fn wrapper
    ik_fn = IK_MODULE.get_ik
    def get_sample_ik_fn(robot, ik_fn, ik_joint_names, base_link_name, tool_from_root=None):
        def sample_ik_fn(world_from_tcp):
            if tool_from_root:
                world_from_tcp = multiply(world_from_tcp, tool_from_root)
            return sample_tool_ik(ik_fn, robot, ik_joint_names, base_link_name, world_from_tcp, get_all=True) #,sampled=[0])
        return sample_ik_fn
    # ik generation function stays the same for all cartesian processes
    sample_ik_fn = get_sample_ik_fn(robot, ik_fn, ik_joint_names, base_link_name, invert(root_from_tcp))

    # * load shape & collision data
    with open(assembly_pkg_json_path, 'r') as f:
        json_data = json.loads(f.read())
    assembly = Assembly.from_package(json_data)
    elements = assembly.elements
    for element in elements.values():
        for unit_geo in element.unit_geometries:
            unit_geo.rescale(scale)
    # TODO: scale derived from the assembly package unit
    # static_obstacles = []
    # for ug in assembly.static_obstacle_geometries.values():
    #     static_obstacles.extend(ug.mesh)

    # * load precomputed sequence / use assigned sequence
    # TODO: load this as function argument
    # element_seq = elements.keys()
    element_seq = [0, 1, 2, 3, 4, 5]
    # element_seq = [3, 4, 5]
    print('sequence: ', element_seq)

    # visualize goal pose
    if has_gui():
        # viz_len = 0.003
        with WorldSaver():
            for e_id in element_seq:
                element = elements[e_id]
                with LockRenderer():
                    print('e_id #{} : {}'.format(e_id, element))
                    for unit_geo in element.unit_geometries:
                        for pb_geo in unit_geo.pybullet_bodies:
                            set_pose(pb_geo, random.choice(unit_geo.get_goal_frames(get_pb_pose=True)))
                # print('---------')
                # wait_for_user()
        # wait_for_user()

    # * construct ignored body-body links for collision checking
    # in this case, including self-collision between links of the robot
    disabled_self_collisions = get_disabled_collisions(robot, disabled_self_collision_link_names)
    # and links between the robot and the workspace (e.g. robot_base_link to base_plate)
    extra_disabled_collisions = get_body_body_disabled_collisions(robot, workspace, workspace_robot_disabled_link_names)
    # TODO: extra disabled collisions as function argument
    extra_disabled_collisions.update({
        ((robot, link_from_name(robot, 'robot_link_5')), (ee_body, link_from_name(ee_body, 'eef_base_link'))),
        ((robot, link_from_name(robot, 'robot_link_6')), (ee_body, link_from_name(ee_body, 'eef_base_link')))
        })

    # * create cartesian processes without a sequence being given, with random pose generators
    cart_process_seq = build_picknplace_cartesian_process_seq(
        element_seq, elements,
        robot, ik_joint_names, root_link, sample_ik_fn,
        ee_attachs=[ee_attach], self_collisions=True, disabled_collisions=disabled_self_collisions,
        obstacles=[workspace],extra_disabled_collisions=extra_disabled_collisions,
        tool_from_root=invert(root_from_tcp), viz_step=False, pick_from_same_rack=True,
        pos_step_size=pos_step_size, ori_step_size=ori_step_size)

    # specifically for UR5, because of its wide joint range, we need to apply joint value snapping
    for cp in cart_process_seq:
        cp.target_conf = robot_start_conf

    with LockRenderer(not viz_inspect):
        if solve_method == 'ladder_graph':
            print('\n'+'#' * 10)
            print('Solving with the vanilla ladder graph search algorithm.')
            cart_process_seq = solve_ladder_graph_from_cartesian_process_list(cart_process_seq,
                verbose=True, warning_pause=warning_pause, viz_inspect=viz_inspect, check_collision=False, start_conf=robot_start_conf)
        elif solve_method == 'sparse_ladder_graph':
            print('\n'+'#' * 10)
            print('Solving with the sparse ladder graph search algorithm.')
            sparse_graph = SparseLadderGraph(cart_process_seq)
            sparse_graph.find_sparse_path(verbose=True, vert_timeout=sample_time, sparse_sample_timeout=sparse_time_out)
            cart_process_seq = sparse_graph.extract_solution(verbose=True, start_conf=robot_start_conf)
        else:
            raise ValueError('Invalid solve method!')
        assert all(isinstance(cp, CartesianProcess) for cp in cart_process_seq)

        pnp_trajs = [[] for _ in range(len(cart_process_seq))]
        for cp_id, cp in enumerate(cart_process_seq):
            element_attachs = []
            for sp_id, sp in enumerate(cp.sub_process_list):
                assert sp.trajectory, '{}-{} does not have a Cartesian plan found!'.format(cp, sp)
                # ! reverse engineer the grasp pose
                if sp.trajectory.tag == 'pick_retreat':
                    unit_geo = elements[sp.trajectory.element_id].unit_geometries[0]
                    e_bodies = unit_geo.pybullet_bodies
                    for e_body in e_bodies:
                        set_pose(e_body, unit_geo.get_initial_frames(get_pb_pose=True)[0])
                        set_joint_positions(sp.trajectory.robot, sp.trajectory.joints, sp.trajectory.traj_path[0])
                        element_attachs.append(create_attachment(sp.trajectory.robot, root_link, e_body))

                if sp.trajectory.tag == 'pick_retreat' or sp.trajectory.tag == 'place_approach':
                    sp.trajectory.attachments= element_attachs
                pnp_trajs[cp_id].append(sp.trajectory)
        full_trajs = pnp_trajs

        # * transition motion planning between extrusions
        return2idle = True
        transition_traj = solve_transition_between_picknplace_processes(pnp_trajs, elements, robot_start_conf,
                                                                        disabled_collisions=disabled_self_collisions,
                                                                        extra_disabled_collisions=extra_disabled_collisions,
                                                                        obstacles=[workspace], return2idle=return2idle,
                                                                        resolutions=[jt_res]*len(ik_joints),
                                                                        **kwargs)

    # * weave the Cartesian and transition processses together
    for cp_id, print_trajs in enumerate(full_trajs):
        print_trajs.insert(0, transition_traj[cp_id][0])
        print_trajs.insert(3, transition_traj[cp_id][1])
    if return2idle:
        full_trajs[-1].append(transition_traj[-1][-1])

    if save_dir is None:
        here = os.path.dirname(__file__)
        save_dir = os.path.join(here, 'results')
    export_trajectory(save_dir, full_trajs, ee_link_name, indent=1, shape_file_path=assembly_pkg_json_path,
        include_robot_data=True, include_link_path=True)

    # * disconnect and close pybullet engine used for planning, visualizing trajectories will start a new one
    reset_simulation()
    disconnect()

    if viewer:
        cart_time_step = None
        tr_time_step = None
        display_picknplace_trajectories(robot_urdf, ik_joint_names,
                                        assembly_pkg_json_path, full_trajs, tool_root_link_name,
                                        ee_urdf=picknplace_end_effector_urdf, workspace_urdf=workspace_urdf, animate=True,
                                        cart_time_step=cart_time_step, tr_time_step=tr_time_step)
 def sample_ik_fn(world_from_tcp):
     if tool_from_root:
         world_from_tcp = multiply(world_from_tcp, tool_from_root)
     return sample_tool_ik(ik_fn, robot, ik_joint_names, base_link_name, world_from_tcp, get_all=True) #,sampled=[0])
示例#5
0
def descartes_demo(viewer=True, scaling_test=False, draw_graph=True):
    connect(use_gui=viewer)
    with HideOutput():
        robot = load_pybullet(KUKA_ROBOT_URDF, fixed_base=True)

    # * adjust camera pose (optional)
    # has_gui checks if the GUI mode is enabled
    if has_gui():
        camera_base_pt = (0,0,0)
        camera_pt = np.array(camera_base_pt) + np.array([1, -0.5, 1])
        set_camera_pose(tuple(camera_pt), camera_base_pt)

    cprint('Hello robot world! <ctrl+left mouse> to pan', 'green')
    wait_if_gui()

    ik_joints = get_movable_joints(robot)
    ik_joint_names = get_joint_names(robot, ik_joints)
    print('ik joint names: {}'.format(ik_joint_names))

    lower_limits, upper_limits = get_custom_limits(robot, ik_joints)
    print('joint lower limit: {}'.format(lower_limits))
    print('joint upper limit: {}'.format(upper_limits))
    # we can also read these velocities from the SRDF file (using e.g. COMPAS_FAB)
    # I'm a bit lazy, just handcode the values here
    vel_limits = {0 : 6.28318530718,
                  1 : 5.23598775598,
                  2 : 6.28318530718,
                  3 : 6.6497044501,
                  4 : 6.77187749774,
                  5 : 10.7337748998}

    # set the robot to a "comfortable" start configuration, optional
    robot_start_conf = [0,-np.pi/2,np.pi/2,0,0,0]
    set_joint_positions(robot, ik_joints, robot_start_conf)

    tool_link = link_from_name(robot, EE_LINK_NAME)
    robot_base_link = link_from_name(robot, ROBOT_BASE_LINK_NAME)
    # tool_from_root = get_relative_pose(robot, root_link, tool_link)

    # * draw EE pose
    if has_gui() :
        tcp_pose = get_link_pose(robot, tool_link)
        draw_pose(tcp_pose)

    circle_center = np.array([0.6, 0, 0.2])
    circle_r = 0.1
    # * generate multiple circles
    ee_poses = []
    full_angle = 2*2*np.pi
    # total num of path pts, one path point per 5 degree
    n_pt = int(full_angle / (np.pi/180 * 5))
    # full_angle = np.pi
    for a in np.linspace(0.0, full_angle, num=n_pt):
        pt = circle_center + circle_r*np.array([np.cos(a), np.sin(a), 0])
        circ_pose = multiply(Pose(point=pt, euler=Euler(yaw=a+np.pi/2)), Pose(euler=Euler(roll=np.pi*3/4)))
        draw_pose(circ_pose, length=0.01)
        ee_poses.append(circ_pose)

    # def get_ee_sample_fn(roll_gen, pitch_gen, yaw_gen):
    #     def ee_sample_fn(ee_pose):
    #         # a finite generator
    #         for roll, pitch, yaw in product(roll_gen, pitch_gen, yaw_gen):
    #             yield multiply(ee_pose, Pose(euler=Euler(roll=roll, pitch=pitch, yaw=yaw)))
    #     return ee_sample_fn
    # roll_sample_size = 3
    # pitch_sample_size = 3
    # yaw_sample_size = 1
    # delta_roll = np.pi/12
    # delta_pitch = np.pi/12
    # roll_gen = np.linspace(-delta_roll, delta_roll, num=roll_sample_size)
    # pitch_gen = np.linspace(-delta_pitch, delta_pitch, num=pitch_sample_size)
    # yaw_gen = np.linspace(0.0, 2*np.pi, num=yaw_sample_size)
    # ee_sample_fn = get_ee_sample_fn(roll_gen, pitch_gen, yaw_gen)

    # for ep in ee_sample_fn(ee_poses[0]):
    #     draw_pose(ep, length=0.03)

    # * baseline, keeping the EE z axis rotational dof fixed
    st_time = time.time()
    path = plan_cartesian_motion(robot, robot_base_link, tool_link, ee_poses)
    print('Solving time: {}'.format(elapsed_time(st_time)))
    if path is None:
        cprint('Gradient-based ik cartesian planning cannot find a plan!', 'red')
    else:
        cprint('Gradient-based ik cartesian planning find a plan!', 'green')
        time_step = 0.03
        for conf in path:
            set_joint_positions(robot, ik_joints, conf)
            wait_for_duration(time_step)
        if draw_graph:
            print('drawing graph...')
            plot_joint_val(path, lower_limits, upper_limits, method='GradIK')
    print('='*20)

    # * Now, let's try if using ladder graph without releasing the ee dof can give us a good trajectory
    # you should be doing something similar in your picture, right?

    # First, we will need ikfast to obtain ik solution variance, same in Descartes
    ik_fn = ikfast_kuka_kr6_r900.get_ik

    # we have to specify ik fn wrapper and feed it into pychoreo
    def get_sample_ik_fn(robot, ik_fn, robot_base_link, ik_joints, tool_from_root=None):
        def sample_ik_fn(world_from_tcp):
            if tool_from_root:
                world_from_tcp = multiply(world_from_tcp, tool_from_root)
            return sample_tool_ik(ik_fn, robot, ik_joints, world_from_tcp, robot_base_link, get_all=True)
        return sample_ik_fn
    # ik generation function stays the same for all cartesian processes
    sample_ik_fn = get_sample_ik_fn(robot, ik_fn, robot_base_link, ik_joints)

    # we ignore self collision in this tutorial, the collision_fn only considers joint limit now
    # See : https://github.com/yijiangh/pybullet_planning/blob/dev/tests/test_collisions.py
    # for more info on examples on using collision function
    collision_fn = get_collision_fn(robot, ik_joints, obstacles=[],
                                       attachments=[], self_collisions=False,
                                    #    disabled_collisions=disabled_collisions,
                                    #    extra_disabled_collisions=extra_disabled_collisions,
                                       custom_limits={})

    # Let's check if our ik sampler is working properly
    # uncomment the following if you want to play with this
    # for p in ee_poses:
    #     print('-'*5)
    #     pb_q = inverse_kinematics(robot, tool_link, p)
    #     if pb_q is None:
    #         cprint('pb ik can\'t find an ik solution', 'red')
    #     qs = sample_ik_fn(p)
    #     if qs is not None:
    #         cprint('But Ikfast does find one! {}'.format(qs[0]), 'green')
    #         # set_joint_positions(robot, ik_joints, qs[0])
    #         # wait_if_gui()
    #     else:
    #         cprint('ikfast can\'t find an ik solution', 'red')

    # * Ok, now we have the ik sampler and collision function ready, let's see if we can find a valid cartesian trajectory!
    ee_vel = 0.005 # m/s

    # st_time = time.time()
    # path, cost = plan_cartesian_motion_lg(robot, ik_joints, ee_poses, sample_ik_fn, collision_fn, \
    #     custom_vel_limits=vel_limits, ee_vel=ee_vel)
    # print('Solving time: {}'.format(elapsed_time(st_time)))
    # if path is None:
    #     cprint('ladder graph (w/o releasing dof) cartesian planning cannot find a plan!', 'red')
    # else:
    #     cprint('ladder graph (w/o releasing dof) cartesian planning find a plan!', 'green')
    #     cprint('Cost: {}'.format(cost), 'yellow')
    #     time_step = 0.03
    #     for conf in path:
    #         # cprint('conf: {}'.format(conf))
    #         set_joint_positions(robot, ik_joints, conf)
    #         wait_for_duration(time_step)
    #     if draw_graph:
    #         print('drawing graph...')
    #         plot_joint_val(path, lower_limits, upper_limits, method='IKFast+LadderGraph')
    # print('='*20)

    # * Now, let's see if releasing EE z axis dof can bring down the cost
    # First, let's build an end effector pose sampler to release the EE z axis dof!
    ## * EE z axis dof generator
    def get_ee_sample_fn(roll_gen, pitch_gen, yaw_gen):
        def ee_sample_fn(ee_pose):
            # a finite generator
            for roll, pitch, yaw in product(roll_gen, pitch_gen, yaw_gen):
                yield multiply(ee_pose, Pose(euler=Euler(roll=roll, pitch=pitch, yaw=yaw)))
        return ee_sample_fn

    # by increasing this number we can see the cost go down
    roll_sample_size = 10
    pitch_sample_size = 10
    yaw_sample_size = 10
    delta_roll = np.pi/6
    delta_pitch = np.pi/6
    delta_yaw = np.pi/6
    roll_gen = np.linspace(-delta_roll, delta_roll, num=roll_sample_size)
    pitch_gen = np.linspace(-delta_pitch, delta_pitch, num=pitch_sample_size)
    yaw_gen = np.linspace(-delta_yaw, delta_yaw, num=yaw_sample_size)
    ee_sample_fn = get_ee_sample_fn(roll_gen, pitch_gen, yaw_gen)

    st_time = time.time()
    path, cost = plan_cartesian_motion_lg(robot, ik_joints, ee_poses, sample_ik_fn, collision_fn, sample_ee_fn=ee_sample_fn, \
        custom_vel_limits=vel_limits, ee_vel=ee_vel)
    print('Solving time: {}'.format(elapsed_time(st_time)))

    # TODO: plot ee z axis deviation plot

    # the ladder graph cost is just summation of all adjacent joint difference
    # so the following assertion should be true
    # conf_array = np.array(path)
    # conf_diff = np.abs(conf_array[:-1,:] - conf_array[1:,:])
    # np_cost = np.sum(conf_diff)
    # assert np.allclose(np_cost, cost), '{} - {}'.format(np_cost, cost)

    if path is None:
        cprint('ladder graph (releasing EE z dof) cartesian planning cannot find a plan!', 'red')
    else:
        cprint('ladder graph (releasing EE z dof) cartesian planning find a plan!', 'cyan')
        cprint('Cost: {}'.format(cost), 'yellow')
        time_step = 0.03
        for conf in path:
            # cprint('conf: {}'.format(conf))
            set_joint_positions(robot, ik_joints, conf)
            wait_for_duration(time_step)
        if draw_graph:
            print('drawing graph...')
            plot_joint_val(path, lower_limits, upper_limits, method='IKFast+LadderGraph+release z dof-res{}'.format(yaw_sample_size))
    print('='*20)

    # Now, let's do a scaling test:
    # if scaling_test:
        # res_sc = list(range(4,30))
        # cost_sc = []
        # for res in res_sc:
        #     yaw_sample_size = res
        #     yaw_gen = np.linspace(0.0, 2*np.pi, num=yaw_sample_size)
        #     ee_sample_fn = get_ee_sample_fn(yaw_gen)
        #     path, cost = plan_cartesian_motion_lg(robot, ik_joints, ee_poses, sample_ik_fn, collision_fn, sample_ee_fn=ee_sample_fn)
        #     assert path is not None
        #     cost_sc.append(cost)
        #     if res % 5 == 0:
        #         if draw_graph:
        #             plot_joint_val(path, lower_limits, upper_limits, method='IKFast+LadderGraph+release z dof-res{}'.format(yaw_sample_size))

        # fig, ax = plt.subplots()
        # ax.plot(res_sc, cost_sc)
        # ax.set(xlabel='yaw angle discr resolution', ylabel='total joint difference cost',
        #        title='Total joint diff scaling test')
        # ax.grid()
        # fig.savefig(os.path.join(HERE, 'images', 'tota_diff-scaling.png'))
        # # plt.show()

    wait_if_gui('Press enter to exit')
示例#6
0
 def ee_sample_fn(ee_pose):
     # a finite generator
     for roll, pitch, yaw in product(roll_gen, pitch_gen, yaw_gen):
         yield multiply(ee_pose, Pose(euler=Euler(roll=roll, pitch=pitch, yaw=yaw)))
示例#7
0
 def sample_ik_fn(world_from_tcp):
     if tool_from_root:
         world_from_tcp = multiply(world_from_tcp, tool_from_root)
     return sample_tool_ik(ik_fn, robot, ik_joints, world_from_tcp, robot_base_link, get_all=True)
def ur_demo(viewer=True, robot_path=UR_ROBOT_URDF, ee_path=EE_PATH, \
    workspace_path=MIT_WORKSPACE_PATH, attach_obj_path=ATTACH_OBJ_PATH, obstacle_obj_path=OBSTACLE_OBJ_PATH):

    # * this will create the pybullet GUI
    # setting viewers=False will enter GUI-free mode
    connect(use_gui=viewer)
    cprint(
        "Welcome to pybullet! <Ctrl+left mouse> to rotate, <Ctrl+middle mouse> to move the camera, <Ctrl+right mouse> to zoom",
        'green')
    cprint('But the space is empty, let\'s load our robot!', 'yellow')
    # wait_for_user is your friend! It will pause the console, but having a separate thread keeping
    # the GUI running so you can rotate and see
    wait_for_user()

    # * This is how we load a robot from a URDF, a workspace from a URDF, or simply a mesh object from an obj file
    # Notice that the pybullet uses *METER* by default, make sure you scale things properly!
    robot = load_pybullet(robot_path, fixed_base=True)
    workspace = load_pybullet(workspace_path, fixed_base=True)
    ee_body = create_obj(ee_path)

    # this will print all the bodies' information in your console
    dump_world()
    cprint(
        'You just loaded a robot, a workspace (with many static objects as its link, I modeled our good old MIT 3-412 shop here), '
        + 'and an end effector (it\'s inside the robot base now)', 'green')
    wait_for_user()

    # * adjust camera pose (optional)
    # has_gui checks if the GUI mode is enabled
    if has_gui():
        camera_base_pt = (0, 0, 0)
        camera_pt = np.array(camera_base_pt) + np.array([1, -0.5, 0.5])
        set_camera_pose(tuple(camera_pt), camera_base_pt)

    # * each joint of the robot are assigned with an integer in pybullet
    ik_joints = get_movable_joints(robot)
    ik_joint_names = get_joint_names(robot, ik_joints)
    cprint('Joint {} \ncorresponds to:\n{}'.format(ik_joints, ik_joint_names),
           'green')
    robot_start_conf = [0, -1.65715, 1.71108, -1.62348, 0, 0]
    cprint("This is before updating pose", 'yellow')
    wait_for_user()
    # * set joint configuration, the robot's pose will be updated
    set_joint_positions(robot, ik_joints, robot_start_conf)
    cprint("This is after set joint pose: {}".format(robot_start_conf),
           'green')
    wait_for_user()

    tool_attach_link_name = 'ee_link'
    tool_attach_link = link_from_name(robot, tool_attach_link_name)

    # * attach the end effector
    ee_link_pose = get_link_pose(robot, tool_attach_link)
    set_pose(ee_body, ee_link_pose)
    ee_attach = create_attachment(robot, tool_attach_link, ee_body)
    # we need to call "assign()" to update the attachment to the current end effector pose
    ee_attach.assign()

    # let's load a bar element (obj) and a box (pybullet primitive shape) into the world
    attached_bar_body = create_obj(attach_obj_path)
    box_body = create_obj(obstacle_obj_path)
    cprint('We loaded a box to our scene!', 'green')
    wait_for_user()

    # * attach the bar
    ee_link_from_tcp = Pose(point=(0.094, 0, 0))
    set_pose(attached_bar_body, multiply(ee_link_pose, ee_link_from_tcp))
    bar_attach = create_attachment(robot, tool_attach_link, attached_bar_body)
    bar_attach.assign()
    cprint('The bar element is attached to the robot', 'green')
    wait_for_user()

    attachments = [ee_attach, bar_attach]

    # * Let's do some collision checking
    # * specify disabled link pairs for collision checking (because they are adjacent / impossible to collide)
    # link name corresponds to the ones specified in the URDF
    # again, each robot link is assigned with an integer index in pybullet
    robot_self_collision_disabled_link_names = [
        ('base_link', 'shoulder_link'), ('ee_link', 'wrist_1_link'),
        ('ee_link', 'wrist_2_link'), ('ee_link', 'wrist_3_link'),
        ('forearm_link', 'upper_arm_link'), ('forearm_link', 'wrist_1_link'),
        ('shoulder_link', 'upper_arm_link'), ('wrist_1_link', 'wrist_2_link'),
        ('wrist_1_link', 'wrist_3_link'), ('wrist_2_link', 'wrist_3_link')
    ]
    self_collision_links = get_disabled_collisions(
        robot, robot_self_collision_disabled_link_names)
    cprint('self_collision_links disabled: {}'.format(self_collision_links),
           'yellow')

    extra_disabled_link_names = [('base_link', 'MIT_3412_robot_base_plate'),
                                 ('shoulder_link', 'MIT_3412_robot_base_plate')
                                 ]
    extra_disabled_collisions = get_body_body_disabled_collisions(
        robot, workspace, extra_disabled_link_names)
    cprint('extra disabled: {}'.format(extra_disabled_collisions), 'yellow')

    print('#' * 10)
    cprint('Checking robot links self-collision', 'green')
    collision_fn = get_collision_fn(robot,
                                    ik_joints,
                                    obstacles=[],
                                    attachments=attachments,
                                    self_collisions=True,
                                    disabled_collisions=self_collision_links)
    conf = [-1.029744, -1.623156, 2.844887, -0.977384, 1.58825, 0.314159]
    # self collision, this should return true
    # this function will first set the robot's joint configuration (together with the attachment)
    # and check collision
    # notice that turning on the diagnosis flag here will show you where the collision is happening
    cprint(
        'Notice that the diagnosis mode will zoom the camera to where the collision is detected. Move the camera around if you can\'t see what\'s going on there.',
        'yellow')
    assert collision_fn(conf, diagnosis=True)

    print('#' * 10)
    cprint('Checking robot links - holding attachment self-collision', 'green')
    collision_fn = get_collision_fn(
        robot,
        ik_joints,
        obstacles=[],
        attachments=attachments,
        self_collisions=True,
        disabled_collisions=self_collision_links,
        extra_disabled_collisions=extra_disabled_collisions)
    conf = [0.03500, -2.26900, 2.44300, 1.117, 1.6579, 0.105]
    assert collision_fn(conf, diagnosis=True)
    print('\n')

    print('#' * 10)
    cprint('Checking robot links to obstacles (w/o links) collision', 'green')
    collision_fn = get_collision_fn(
        robot,
        ik_joints,
        obstacles=[box_body],
        attachments=attachments,
        self_collisions=True,
        disabled_collisions=self_collision_links,
        extra_disabled_collisions=extra_disabled_collisions)
    conf = [
        -0.105, -0.76800000000000002, 1.292, -0.61099999999999999, 1.484, 0.105
    ]
    assert collision_fn(conf, diagnosis=True)
    print('\n')

    print('#' * 10)
    cprint('Checking robot links to multi-link obstacle collision', 'green')
    collision_fn = get_collision_fn(
        robot,
        ik_joints,
        obstacles=[workspace],
        attachments=[],
        self_collisions=True,
        disabled_collisions=self_collision_links,
        extra_disabled_collisions=extra_disabled_collisions)
    conf = [
        -0.17499999999999999, -3.194, 0.33200000000000002, -1.6579999999999999,
        1.431, 0.105
    ]
    assert collision_fn(conf, diagnosis=True)
    print('\n')

    print('#' * 10)
    cprint('Checking attachment to obstacles (w/o links) collision', 'green')
    collision_fn = get_collision_fn(
        robot,
        ik_joints,
        obstacles=[workspace, box_body],
        attachments=attachments,
        self_collisions=True,
        disabled_collisions=self_collision_links,
        extra_disabled_collisions=extra_disabled_collisions)
    conf = [
        -2.8100000000000001, -1.484, -1.9199999999999999, -1.6579999999999999,
        1.431, 0.105
    ]
    assert collision_fn(conf, diagnosis=True)
    print('\n')

    print('#' * 10)
    cprint('Checking attachment to multi-link obstacle collision', 'green')
    collision_fn = get_collision_fn(
        robot,
        ik_joints,
        obstacles=[workspace],
        attachments=attachments,
        self_collisions=True,
        disabled_collisions=self_collision_links,
        extra_disabled_collisions=extra_disabled_collisions)
    conf = [
        -0.17499999999999999, -2.4780000000000002, 0.33200000000000002,
        -1.6579999999999999, 1.431, 0.105
    ]
    assert collision_fn(conf, diagnosis=True)
    print('\n')

    # * collision checking exoneration
    print('#' * 10)
    print('self-link collision disable')
    collision_fn = get_collision_fn(robot,
                                    ik_joints,
                                    obstacles=[],
                                    attachments=[],
                                    self_collisions=False)
    conf = [-1.029744, -1.623156, 2.844887, -0.977384, 1.58825, 0.314159]
    assert not collision_fn(conf, diagnosis=True)
    print('\n')

    print('#' * 10)
    cprint('robot links to obstacle collision exoneration', 'green')
    cprint(
        'In this example, the first collision function will check collision between the robot and the box, '
        + 'but the second collision function will ignore it.', 'yellow')
    collision_fn = get_collision_fn(
        robot,
        ik_joints,
        obstacles=[box_body],
        attachments=[],
        self_collisions=True,
        disabled_collisions=self_collision_links,
    )
    collision_fn_disable = get_collision_fn(
        robot,
        ik_joints,
        obstacles=[box_body],
        attachments=[],
        self_collisions=True,
        disabled_collisions=self_collision_links,
        extra_disabled_collisions=extra_disabled_collisions.union([
            ((robot, link_from_name(robot,
                                    'forearm_link')), (box_body, BASE_LINK))
        ]),
    )
    conf = [
        -3.2639999999999998, -2.6880000000000002, -0.85499999999999998, -1.536,
        3.0369999999999999, -0.070000000000000007
    ]
    assert collision_fn(conf, diagnosis=True)
    assert not collision_fn_disable(conf, diagnosis=True)
    print('\n')

    # * joint value overflow checking & exoneration
    cprint('joint value overflow checking & exoneration', 'green')
    cprint(
        'collision_fn also checks for robot\'s joint limit as well. We can also exonerate it by passing custom_limits into the collision_fn',
        'yellow')

    def get_custom_limits_from_name(robot, joint_limits):
        return {
            joint_from_name(robot, joint): limits
            for joint, limits in joint_limits.items()
        }

    custom_limits = get_custom_limits_from_name(robot, {
        'shoulder_pan_joint': (-7.9, 0),
        'elbow_joint': (-8.0, 0)
    })
    collision_fn = get_collision_fn(robot, ik_joints)
    collision_fn_disable = get_collision_fn(robot,
                                            ik_joints,
                                            custom_limits=custom_limits)
    conf = [
        -7.8450000000000002, -2.1469999999999998, -7.99, -0.92500000000000004,
        1.78, 0.105
    ]
    assert collision_fn(conf, diagnosis=True)
    assert not collision_fn_disable(conf, diagnosis=True)
    print('\n')
示例#9
0
 def tf_world_frame(self, base_to_pose, armname):
     basename = self.arms_base[armname]
     baseid = pyplan.link_from_name(self.id, basename)
     world_to_base = pyplan.get_link_pose(self.id, baseid)
     world_to_pose = pyplan.multiply(world_to_base, base_to_pose)
     return world_to_pose
示例#10
0
 def tf_arm_frame(self, pose, armname):
     basename = self.arms_base[armname]
     baseid = pyplan.link_from_name(self.id, basename)
     base_to_world = pyplan.invert(pyplan.get_link_pose(self.id, baseid))
     base_to_pose = pyplan.multiply(base_to_world, pose)
     return base_to_pose