import demos_common_imports
import time
import numpy as np

from pyrieef.geometry.workspace import EnvBox
from pyrieef.motion.trajectory import linear_interpolation_trajectory
from pyrieef.motion.trajectory import no_motion_trajectory
from pyrieef.motion.objective import MotionOptimization2DCostMap
from pyrieef.optimization import algorithms
from pyrieef.rendering.optimization import TrajectoryOptimizationViewer

# ----------------------------------------------------------------------------
print("Run Motion Optimization")
trajectory = linear_interpolation_trajectory(q_init=-.22 * np.ones(2),
                                             q_goal=.3 * np.ones(2),
                                             T=22)
# trajectory = no_motion_trajectory(q_init=-.22 * np.ones(2), T=22)

# -----------------------------------------------------------------------------
# The Objective function is wrapped in the optimization viewer object
# which draws the trajectory as it is being optimized
objective_traj = MotionOptimization2DCostMap(
    box=EnvBox(origin=np.array([0, 0]), dim=np.array([1., 1.])),
    T=trajectory.T(),
    q_init=trajectory.initial_configuration(),
    q_goal=trajectory.final_configuration())
objective_traj.set_scalars(obstacle_scalar=1.,
                           init_potential_scalar=0.,
                           term_potential_scalar=10000000.,
                           acceleration_scalar=2.)
예제 #2
0
from pyrieef.motion.trajectory import linear_interpolation_trajectory
from pyrieef.motion.freeflyer import FreeflyerObjective
from pyrieef.motion.cost_terms import *
from pyrieef.kinematics.robot import *
from pyrieef.optimization import algorithms
from pyrieef.rendering.optimization import TrajectoryOptimizationViewer

DRAW_MODE = "pyglet2d"  # None, pyglet2d, pyglet3d or matplotlib
ALPHA = 10.
MARGIN = .20
OFFSET = 0.1

print("Run Motion Optimization")
robot = create_robot_from_file(scale=.02)
trajectory = linear_interpolation_trajectory(q_init=np.array([-.2, -.2, 0]),
                                             q_goal=np.array([.3, .3, 0]),
                                             T=22)
# trajectory = no_motion_trajectory(q_init=-.22 * np.ones(2), T=22)
# The Objective function is wrapped in the optimization viewer object
# which draws the trajectory as it is being optimized
objective = FreeflyerObjective(T=trajectory.T(),
                               n=trajectory.n(),
                               q_init=trajectory.initial_configuration(),
                               q_goal=trajectory.final_configuration(),
                               robot=robot)

workspace = Workspace()
workspace.obstacles.append(Circle(np.array([0.2, .15]), .1))
workspace.obstacles.append(Circle(np.array([-.1, .15]), .1))
sdf = SignedDistanceWorkspaceMap(workspace)
phi = ObstaclePotential2D(signed_distance_field=sdf, scaling=100., alpha=1.e-2)
from pyrieef.motion.freeflyer import FreeflyerObjective
from pyrieef.motion.cost_terms import *
from pyrieef.kinematics.robot import *
from pyrieef.optimization import algorithms
from pyrieef.rendering.optimization import TrajectoryOptimizationViewer

DRAW_MODE = "pyglet2d"  # None, pyglet2d, pyglet3d or matplotlib
ALPHA = 10.
MARGIN = .20
OFFSET = 0.1

print("Run Motion Optimization")
robot = create_freeflyer_from_file(scale=.02)
trajectory = linear_interpolation_trajectory(
    q_init=np.array([.0, -.4, 0]),
    q_goal=np.array([.0, .4, 0]),
    T=22
)
# trajectory = no_motion_trajectory(q_init=-.22 * np.ones(2), T=22)
# The Objective function is wrapped in the optimization viewer object
# which draws the trajectory as it is being optimized
objective = FreeflyerObjective(
    T=trajectory.T(),
    n=trajectory.n(),
    q_init=trajectory.initial_configuration(),
    q_goal=trajectory.final_configuration(),
    robot=robot)

workspace = Workspace()
workspace.obstacles.append(Box(
    origin=np.array([-.3, 0]), dim=np.array([.4, .02])))
예제 #4
0
 def geometric_replan(self):
     '''
     This function plan partial trajectory upto next symbolic change
     '''
     if not self.plans and self.plan is None:
         HumoroLGP.logger.warn(
             'Symbolic plan is empty. Cannot plan geometric trajectory!')
         return False
     if not self._check_move():
         return True
     # clear previous paths
     robot = self.workspace.get_robot_link_obj()
     robot.paths.clear()
     self.objectives.clear()
     self.geometric_elapsed_t = 0
     # prepare workspace
     self.place_human()
     workspace = self.workspace.get_pyrieef_ws()
     if self.plan is not None:  # current symbolic plan is still feasible, continue with this plan
         if not self.check_plan():
             self.plan = None  # remove current symbolic plan
             self.symbolic_elapsed_t = 0  # reset symbolic elapsed time
             HumoroLGP.logger.warn(
                 f'Current symbolic plan becomes symbolically infeasible at time {self.lgp_t}. Trying replanning at next trigger.'
             )
             return False
         a, t = self._get_next_move(self.plan)
         location = a.parameters[0]
         current = self.workspace.get_robot_geometric_state()
         goal_manifold = self.workspace.kin_tree.nodes[location]['limit']
         if self.traj_init == 'nearest':
             goal = get_closest_point_on_circle(current, goal_manifold)
         elif self.traj_init == 'outer':
             goal = self.landmarks[location]
         else:
             HumoroLGP.logger.error(
                 f'Traj init scheme {self.traj_init} not support!')
             raise ValueError()
         if self.full_replan:
             waypoints, waypoint_manifolds = self.get_waypoints()
             trajectory = linear_interpolation_waypoints_trajectory(
                 waypoints)
             objective = TrajectoryConstraintObjective(
                 dt=1 / self.fps, enable_viewer=self.enable_viewer)
             objective.set_problem(workspace=workspace,
                                   trajectory=trajectory,
                                   waypoint_manifolds=waypoint_manifolds,
                                   goal_manifold=waypoint_manifolds[-1][0])
         else:
             trajectory = linear_interpolation_trajectory(current, goal, t)
             objective = TrajectoryConstraintObjective(
                 dt=1 / self.fps, enable_viewer=self.enable_viewer)
             objective.set_problem(workspace=workspace,
                                   trajectory=trajectory,
                                   goal_manifold=goal_manifold)
         if self.enable_viewer:
             self.viewer.initialize_viewer(objective, objective.trajectory)
             status = Value(c_bool, True)
             traj = Array(c_double, objective.n * (objective.T + 2))
             p = Process(target=objective.optimize, args=(status, traj))
             p.start()
             self.viewer.run()
             p.join()
             success = status.value
             traj = Trajectory(q_init=np.array(traj[:2]),
                               x=np.array(traj[2:]))
         else:
             success, traj = objective.optimize()
         if success:
             robot.paths.append(traj)
             return True
         else:
             self.plan = None  # remove current symbolic plan
             self.symbolic_elapsed_t = 0  # reset symbolic elapsed time
             HumoroLGP.logger.warn(
                 f'Current symbolic plan becomes geometrically infeasible at time {self.lgp_t}. Trying replanning at next trigger.'
             )
             return False
     else:
         self.ranking = []
         self.chosen_plan_id = None
         for i, plan in enumerate(self.plans):
             a, t = self._get_next_move(plan)
             location = a.parameters[0]
             current = self.workspace.get_robot_geometric_state()
             goal_manifold = self.workspace.kin_tree.nodes[location][
                 'limit']
             if self.traj_init == 'nearest':
                 goal = get_closest_point_on_circle(current, goal_manifold)
             elif self.traj_init == 'outer':
                 goal = self.landmarks[location]
             else:
                 HumoroLGP.logger.error(
                     f'Traj init scheme {self.traj_init} not support!')
                 raise ValueError()
             if self.full_replan:
                 waypoints, waypoint_manifolds = self.get_waypoints(plan)
                 trajectory = linear_interpolation_waypoints_trajectory(
                     waypoints)
                 objective = TrajectoryConstraintObjective(
                     dt=1 / self.fps, enable_viewer=self.enable_viewer)
                 objective.set_problem(
                     workspace=workspace,
                     trajectory=trajectory,
                     waypoint_manifolds=waypoint_manifolds,
                     goal_manifold=waypoint_manifolds[-1][0])
             else:
                 trajectory = linear_interpolation_trajectory(
                     current, goal, t)
                 objective = TrajectoryConstraintObjective(
                     dt=1 / self.fps, enable_viewer=self.enable_viewer)
                 objective.set_problem(workspace=workspace,
                                       trajectory=trajectory,
                                       goal_manifold=goal_manifold)
             self.objectives.append(objective)
             self.ranking.append((objective.cost(), i))
         # rank the plans
         self.ranking.sort(key=operator.itemgetter(0))
         # optimize the objective according to self.ranking
         for r in self.ranking:
             if self.enable_viewer:
                 self.viewer.initialize_viewer(
                     self.objectives[r[1]],
                     self.objectives[r[1]].trajectory)
                 status = Value(c_bool, True)
                 traj = Array(
                     c_double, self.objectives[r[1]].n *
                     (self.objectives[r[1]].T + 2))
                 p = Process(target=self.objectives[r[1]].optimize,
                             args=(status, traj))
                 p.start()
                 self.viewer.run()
                 p.join()
                 success = status.value
                 traj = Trajectory(q_init=np.array(traj[:2]),
                                   x=np.array(traj[2:]))
             else:
                 success, traj = self.objectives[r[1]].optimize()
             if success:  # choose this plan
                 self.plan = self.plans[r[1]]
                 self.chosen_plan_id = r[1]
                 if self.verbose:
                     for a in self.plan[1]:
                         HumoroLGP.logger.info(a.name + ' ' +
                                               ' '.join(a.parameters))
                 robot.paths.append(traj)
                 return True
         HumoroLGP.logger.warn(
             f'All replan geometrical optimization infeasible at current time {self.lgp_t}. Trying replanning at next trigger.'
         )
         return False