def visualize_movement(start, path):
    display_trajectory_publisher = rospy.Publisher(
        '/move_group/display_planned_path', DisplayTrajectory,
        queue_size=100)  # for visualizing the robot movement;

    sleep(0.5)

    display_trajectory = DisplayTrajectory()

    display_trajectory.trajectory_start = convertStateToRobotState(start)
    trajectory = RobotTrajectory()
    points = []
    joint_trajectory = JointTrajectory()
    joint_trajectory.joint_names = get_joint_names('right')
    for state, _ in path:
        point = JointTrajectoryPoint()
        point.positions = convertStateToRobotState(state).joint_state.position
        points.append(point)
    joint_trajectory.points = points
    trajectory.joint_trajectory = joint_trajectory

    # print("The joint trajectory is: %s" % trajectory)

    display_trajectory.trajectory.append(trajectory)
    display_trajectory_publisher.publish(display_trajectory)
def generate_edges(model, device, state_validity_checker, fk_client,
                   include_pos):
    max_segment_factor = 0.01

    endpoints = get_cfree_endpoints(state_validity_checker, fk_client,
                                    include_pos)
    start = endpoints[0]
    end = endpoints[1]
    # print("The c_free endpoints are: %s" % endpoints,)

    start_latent = start
    end_latent = end
    with torch.no_grad():
        mu_s, logvar_s = model.encode(
            torch.from_numpy(np.expand_dims(start_latent,
                                            axis=0)).float().to(device))
        z_s = model.reparameterize(mu_s, logvar_s)
        start_latent = z_s.numpy()[0, :]

        mu_e, logvar_e = model.encode(
            torch.from_numpy(np.expand_dims(end_latent,
                                            axis=0)).float().to(device))
        z_e = model.reparameterize(mu_e, logvar_e)
        end_latent = z_e.numpy()[0, :]

    # print("The start position is: %s" % start_pos)
    # print("The end position is: %s" % end_pos)

    latent_max_segment = set_max_segment_truncated_gaussian(
        np.zeros(d_output), np.ones(d_output), max_segment_factor)
    latent_path = linear_interpolation(start_latent, end_latent,
                                       latent_max_segment)

    # Generate decoded path;
    decoded_path = []
    for pos in latent_path:
        with torch.no_grad():
            point = torch.from_numpy(pos).float().to(
                device)  # latent_space sample
            sample = model.decode(
                point).cpu().double().numpy()  # c_space sample
            robot_state = convertStateToRobotState(sample[:7])
            isValid = state_validity_checker.getStateValidity(
                robot_state).valid  # checking collision status in c-space;
            decoded_path.append((sample, isValid))

    cspace_max_segment = set_max_segment_cspace(
        np.array(JOINT_LIMITS.values()), max_segment_factor)
    cspace_path = []

    start_cspace = np.array(start)[:7]
    end_cspace = np.array(end)[:7]
    path = linear_interpolation(start_cspace, end_cspace, cspace_max_segment)
    for pos in path:
        robot_state = convertStateToRobotState(pos)
        isValid = state_validity_checker.getStateValidity(
            robot_state).valid  # checking collision status in c-space;
        cspace_path.append((pos, isValid))
    return decoded_path, cspace_path
def plan(samplerIndex, start, goal, ss, space, display_trajectory_publisher):
    si = ss.getSpaceInformation()

    if samplerIndex == 1:
        # use obstacle-based sampling
        space.setStateSamplerAllocator(
            ob.StateSamplerAllocator(allocSelfCollisionFreeStateSampler))

    ss.setStartAndGoalStates(start, goal)

    # create a planner for the defined space
    planner = og.FMT(si)  # change this to FMT;
    if samplerIndex == 1:
        planner.setVAEFMT(
            1)  # This flag is for turning on sampling with the VAE generator;

    # set parameter;
    planner.setExtendedFMT(
        False)  # do not extend if the planner does not terminate;
    planner.setNumSamples(100)
    planner.setNearestK(False)
    planner.setCacheCC(True)
    planner.setHeuristics(True)

    # planner.setNearestK(1) # Disable K nearest neighbor implementation;
    ss.setPlanner(planner)

    start_time = time.time()
    solved = ss.solve(40.0)
    elapsed_time = time.time() - start_time
    if solved:
        print("Found solution after %s seconds:" % elapsed_time)
        # print the path to screen
        path = ss.getSolutionPath()
        # ("The solution is: %s" % path)

        # Visualization
        display_trajectory = DisplayTrajectory()
        display_trajectory.trajectory_start = convertStateToRobotState(start)
        trajectory = convertPlanToTrajectory(path)
        display_trajectory.trajectory.append(trajectory)
        display_trajectory_publisher.publish(display_trajectory)
        print("Visualizing trajectory...")
        sleep(0.5)

    else:
        print("No solution found after %s seconds: " % elapsed_time)
    return elapsed_time, float((int(solved)))
def convertPlanToTrajectory(path):
    """
    Convert a path in OMPL to a trajectory in Moveit that could be used for display;
    :param plan: A path in OMPL;
    :return: a RobotTrajectory message in Moveit;
    """
    trajectory = RobotTrajectory()
    states = path.getStates()
    points = []
    joint_trajectory = JointTrajectory()
    joint_trajectory.joint_names = get_joint_names('right')
    for state in states:
        point = JointTrajectoryPoint()
        point.positions = convertStateToRobotState(state).joint_state.position
        points.append(point)
    joint_trajectory.points = points
    trajectory.joint_trajectory = joint_trajectory
    return trajectory
Exemple #5
0
 def isValid(self, state):
     # ToDo: Check the number of joints available in states;
     robot_state = convertStateToRobotState(state)
     return self.state_validity_checker.getStateValidity(robot_state).valid