コード例 #1
0
def draw_line_rviz(traj):
    marker_pub = rospy.Publisher("/visualization_marker", Marker, queue_size=1000)
    rospy.sleep(0.5)
    for idx in range(traj.shape[0]):
        traj_pose = Pose()
        traj_pose.position.x = traj[idx, 0]
        traj_pose.position.y = traj[idx, 1]
        # traj_pose.position.z = traj[idx, 2]-0.15
        traj_pose.position.z = traj[idx, 2] - 0.21
        traj_pose.orientation.x = 0.000016822
        traj_pose.orientation.y = 0.9993395783
        traj_pose.orientation.z = - 0.0362892576
        traj_pose.orientation.w = 0.0018699794
        # visualize the pose in rviz using marker
        alpha = float(idx) / traj.shape[0] * 0.5 + 0.5
        rgba_tuple = ((0.5 * idx), 0.5, (0.5 * idx), alpha)
        util.send_traj_point_marker(marker_pub=marker_pub, pose=traj_pose, id=idx, rgba_tuple=rgba_tuple)
コード例 #2
0
def main():
    rospy.init_node('test', anonymous=True)
    csv_path = '/home/shuangda/ros/kuka_ws/src/birl_pydmps/demo/hand_marker.csv'
    data_csv = pd.read_csv(csv_path)
    raw_data = data_csv.values[:, 24:27].astype(float)
    filtered_data = gaussian_filter1d(raw_data.T, sigma=5).T
    marker_pub = rospy.Publisher("/visualization_marker",
                                 Marker,
                                 queue_size=100)
    step_amount = 15
    for idx, point in enumerate(filtered_data):
        now_pose = Pose()
        now_pose.position.x = point[0]
        now_pose.position.y = point[1]
        now_pose.position.z = point[2]

        # visualize the pose in rviz using marker
        alpha = float(idx) / step_amount * 0.5 + 0.5
        rgba_tuple = (0, 0.5, 0.5, alpha)
        ipdb.set_trace()
        util.send_traj_point_marker(marker_pub=marker_pub,
                                    pose=now_pose,
                                    id=count,
                                    rgba_tuple=rgba_tuple)
    step_amount = 30
    step_length = 0.01
    list_of_poses = []
    for count in range(0, step_amount + 1):
        step_apart = step_amount - count
        now_pose = copy.deepcopy(button_pose)
        now_translation = button_pose_trans - step_apart * step_length * z_axis_vec
        now_pose.position.x = now_translation[0]
        now_pose.position.y = now_translation[1]
        now_pose.position.z = now_translation[2]

        # visualize the pose in rviz using marker
        alpha = float(count) / step_amount * 0.5 + 0.5
        rgba_tuple = (0, 0.5, 0.5, alpha)
        util.send_traj_point_marker(marker_pub=marker_pub,
                                    pose=now_pose,
                                    id=count,
                                    rgba_tuple=rgba_tuple)

        rospy.loginfo("add one pose")
        list_of_poses.append(now_pose)

    rospy.loginfo("============ Generating plan")
    group.set_max_acceleration_scaling_factor(0.01)
    group.set_max_velocity_scaling_factor(0.01)
    (plan, fraction) = group.compute_cartesian_path(
        list_of_poses,  # waypoints to follow
        0.01,  # eef_step
        0.0)  # jump_threshold

    display_trajectory = moveit_msgs.msg.DisplayTrajectory()
    display_trajectory.trajectory_start = robot.get_current_state()
コード例 #4
0
def main():
    global data_path, train_len, tau, limb_name

    moveit_commander.roscpp_initialize(sys.argv)

    rospy.init_node('grab_object_moveit')

    robot = moveit_commander.RobotCommander()
    scene = moveit_commander.PlanningSceneInterface()
    group = moveit_commander.MoveGroupCommander("left_arm")

    display_trajectory_publisher = rospy.Publisher(
        '/move_group/display_planned_path',
        moveit_msgs.msg.DisplayTrajectory,
        queue_size=20)

    marker_pub = rospy.Publisher("/visualization_marker",
                                 Marker,
                                 queue_size=100)

    ############## get data from DMP
    # limb = baxter_interface.Limb(limb_name)
    train_set = pd.read_csv(data_path)  # using pandas read data
    resample_t = np.linspace(train_set.values[0, 0], train_set.values[-1, 0],
                             train_len)  # resampling the time

    postion_x = np.interp(resample_t, train_set.values[:, 0],
                          train_set.values[:, 1])
    postion_y = np.interp(resample_t, train_set.values[:, 0],
                          train_set.values[:, 2])
    postion_z = np.interp(resample_t, train_set.values[:, 0],
                          train_set.values[:, 3])
    orientation_x = np.interp(resample_t, train_set.values[:, 0],
                              train_set.values[:, 4])
    orientation_y = np.interp(resample_t, train_set.values[:, 0],
                              train_set.values[:, 5])
    orientation_z = np.interp(resample_t, train_set.values[:, 0],
                              train_set.values[:, 6])
    orientation_w = np.interp(resample_t, train_set.values[:, 0],
                              train_set.values[:, 7])

    traj = [[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]] * train_len
    for i in range(train_len):
        traj[i] = [
            postion_x[i], postion_y[i], postion_z[i], orientation_x[i],
            orientation_y[i], orientation_z[i], orientation_w[i]
        ]

    train_set_T = np.array([
        np.array([
            postion_x, postion_y, postion_z, orientation_x, orientation_y,
            orientation_z, orientation_w
        ]).T
    ])
    param_w, base_function = train(train_set_T)
    # object_vision = get_base_camera_pose(flag=True)
    # end_pose = limb.endpoint_pose()
    # get current angles
    # endpoint_pose = [end_pose['position'].x,
    #                  end_pose['position'].y,
    #                  end_pose['position'].z,
    #                  end_pose['orientation'].x,
    #                  end_pose['orientation'].y,
    #                  end_pose['orientation'].z,
    #                  end_pose['orientation'].w]

    start_point = traj[0]
    #    start_point = endpoint_pose
    #    print start_point
    #    ending_point = object_vision
    ending_point = traj[-1]
    #    print ending_point
    dmp = pydmps.dmp_discrete.DMPs_discrete(n_dmps=7, n_bfs=500, w=param_w)
    for i in range(7):
        dmp.y0[i] = start_point[i]  # set the initial state
        dmp.goal[i] = ending_point[i]  # set the ending goal

    y_track, dy_track, ddy_track = dmp.rollout(tau=tau)
    ############## get data from DMP
    #    rospy.loginfo("move_to_start_pose... ")
    #    move_to_start_pose(start_pose = y_track[0],limb_name = limb_name)
    #    rospy.loginfo("Done... ")

    #    step_length = 0.01
    list_of_poses = []
    #    list_of_poses.append(group.get_current_pose().pose)
    for idx in range(train_len):
        traj_pose = Pose()
        traj_pose.position.x = y_track[idx, 0]
        traj_pose.position.y = y_track[idx, 1]
        traj_pose.position.z = y_track[idx, 2]
        traj_pose.orientation.x = y_track[idx, 3]
        traj_pose.orientation.y = y_track[idx, 4]
        traj_pose.orientation.z = y_track[idx, 5]
        traj_pose.orientation.w = y_track[idx, 6]
        # visualize the pose in rviz using marker
        alpha = float(idx) / train_len * 0.5 + 0.5
        rgba_tuple = ((0.5 * idx), 0.5, (0.5 * idx), alpha)
        util.send_traj_point_marker(marker_pub=marker_pub,
                                    pose=traj_pose,
                                    id=idx,
                                    rgba_tuple=rgba_tuple)
        rospy.loginfo("add one pose")
        list_of_poses.append(copy.deepcopy(traj_pose))
#    ipdb.set_trace()
    rospy.loginfo("============ Generating plan")
    group.set_max_acceleration_scaling_factor(0.01)
    group.set_max_velocity_scaling_factor(0.01)
    (plan, fraction) = group.compute_cartesian_path(
        list_of_poses,  # waypoints to follow
        0.01,  # eef_step
        0.0)  # jump_threshold
    print "============ Waiting while RVIZ displays plan.."

    #    group.plan()
    #    ipdb.set_trace()
    display_trajectory = moveit_msgs.msg.DisplayTrajectory()
    display_trajectory.trajectory_start = robot.get_current_state()
    display_trajectory.trajectory.append(plan)
    rospy.loginfo("============ Visulaize plan")
    display_trajectory_publisher.publish(display_trajectory)
    rospy.sleep(10)
    #    ipdb.set_trace()

    rospy.loginfo(
        "============ Enter \"ok\" to execute plan, \"no\" to cancel execute plan "
    )
    s = raw_input()
    if s == 'ok':
        rospy.loginfo("============ Gonna execute plan")
        group.execute(plan)
        rospy.loginfo("============ Done")
    if s == 'no':
        rospy.loginfo("============ already cancel")