else:
    raise RuntimeError ("Failed to apply constraint.")

if not(robot.isConfigValid(q1proj)):
    raise RuntimeError ("Projected config non valid.")

res = ps.applyConstraints (q2)
if res [0]:
    q2proj = res [1]
else:
    raise RuntimeError ("Failed to apply constraint.")

if not(robot.isConfigValid(q2proj)):
    raise RuntimeError ("Projected config non valid.")

ps.setInitialConfig (q1proj); ps.addGoalConfig (q2proj)
ps.loadObstacleFromUrdf ("iai_maps", "kitchen_area", "") # environment

#RRT-connect: path of 36s with 68 waypoints (2600 var to optimize)
#ps.selectPathPlanner ("VisibilityPrmPlanner")

ps.solve ()
ps.optimizePath(0)


pp = PathPlayer (robot.client, r)

pp (0)
pp (1)

len(ps.getWaypoints (0))
q_goal [rank] = -0.5
rank = robot.rankInConfiguration ['r_shoulder_lift_joint']
q_goal [rank] = 0.5
rank = robot.rankInConfiguration ['r_elbow_flex_joint']
q_goal [rank] = -0.5
r (q_goal)

## CONSTRAINTS ##
# "torso_lift_joint" : do not constraint z axis because different for q_init and q_goal
# constraint torso to be at position [-3.25, -4.0, 0.790675] in workspace frame
#ps.createPositionConstraint ("posConstraint", "torso_lift_joint", "", [0,0,0], [-3.25, -4.0, 0.790675], [1,1,0])
#ps.setNumericalConstraints ("constraints", ["posConstraint"])


ps.loadObstacleFromUrdf ("iai_maps", "kitchen_area", "") # environment
ps.setInitialConfig (q_init); ps.addGoalConfig (q_goal)

#ps.selectPathPlanner ("VisibilityPrmPlanner") 
begin=time.time()
ps.solve ()
end=time.time()
print "Solving time: "+str(end-begin)

ps.addPathOptimizer("GradientBased")
begin=time.time()
ps.optimizePath(0)
end=time.time()
print "Optim time: "+str(end-begin)
cl = robot.client
cl.problem.getIterationNumber ()
cl.problem.getComputationTime ()
Beispiel #3
0
res = ps.applyConstraints(q2)
if res[0]:
    q2proj = res[1]
else:
    raise RuntimeError("Failed to apply constraint.")

ps.selectPathValidation("Progressive", 0.025)

import datetime as dt
totalTime = dt.timedelta(0)
totalNumberNodes = 0
N = 20
for i in range(N):
    ps.client.problem.clearRoadmap()
    ps.resetGoalConfigs()
    ps.setInitialConfig(q1proj)
    ps.addGoalConfig(q2proj)
    t1 = dt.datetime.now()
    ps.solve()
    t2 = dt.datetime.now()
    totalTime += t2 - t1
    print(t2 - t1)
    n = len(ps.client.problem.nodes())
    totalNumberNodes += n
    print("Number nodes: " + str(n))

print("Average time: " +
      str((totalTime.seconds + 1e-6 * totalTime.microseconds) / float(N)))
print("Average number nodes: " + str(totalNumberNodes / float(N)))

from hpp.gepetto import ViewerFactory
res = ps.applyConstraints (q2)
if res [0]:
    q2proj = res [1]
else:
    raise RuntimeError ("Failed to apply constraint.")

ps.selectPathValidation ("Progressive", 0.025)

import datetime as dt
totalTime = dt.timedelta (0)
totalNumberNodes = 0
N = 20
for i in range (N):
    ps.client.problem.clearRoadmap ()
    ps.resetGoalConfigs ()
    ps.setInitialConfig (q1proj)
    ps.addGoalConfig (q2proj)
    t1 = dt.datetime.now ()
    ps.solve ()
    t2 = dt.datetime.now ()
    totalTime += t2 - t1
    print (t2-t1)
    n = len (ps.client.problem.nodes ())
    totalNumberNodes += n
    print ("Number nodes: " + str(n))

print ("Average time: " + str ((totalTime.seconds+1e-6*totalTime.microseconds)/float (N)))
print ("Average number nodes: " + str (totalNumberNodes/float (N)))

from hpp.gepetto import ViewerFactory
vf = ViewerFactory (ps)
ps = ProblemSolver (robot)
cl = robot.client

# Configs : [x, y, z, q1, q2, q3, q4, dir.x, dir.y, dir.z, theta]
q11 = [6.2, 0.5, 0.5, 0, 0, 0, 1, 0, 0, 1, 0]; q22 = [-4.4, -1.5, 6.5, 0, 0, 0, 1, 0, 0, 1, 0]

from hpp.gepetto import Viewer, PathPlayer
r = Viewer (ps); gui = r.client.gui
pp = PathPlayer (robot.client, r)
r.loadObstacleModel ("animals_description","scene_jump_harder","scene_jump_harder")

q1 = cl.robot.projectOnObstacle (q11, 0.001); q2 = cl.robot.projectOnObstacle (q22, 0.001)
robot.isConfigValid(q1); robot.isConfigValid(q2)
r(q1)

ps.setInitialConfig (q1); ps.addGoalConfig (q2)

offsetOrientedPath = 2 # If remove oriented path computation in ProblemSolver, set to 1 instead of 2

#plotFrame (r, 'frame_group', [0,0,0], 0.6)

# First parabolas: vmax = 7m/s,  mu = 1.2
cl.problem.setFrictionCoef(1.2); cl.problem.setMaxVelocityLim(7)
ps.clearRoadmap();
solveTime = ps.solve () # 299 nodes
pathId = ps.numberPaths()-offsetOrientedPath # path without orientation stuff

pathSamples = plotSampleSubPath (cl, r, pathId, 70, "path0", [0,0,1,1])
plotCone (q1, cl, r, "cone_first", "friction_cone_SG2"); plotCone (q2, cl, r, "cone_second", "friction_cone_SG2")
plotConeWaypoints (cl, pathId, r, "cone_wp_group", "friction_cone_WP2")
class AbstractPathPlanner:

    rbprmBuilder = None
    ps = None
    v = None
    afftool = None
    pp = None
    extra_dof_bounds = None
    robot_node_name = None  # name of the robot in the node list of the viewer

    def __init__(self, context=None):
        """
        Constructor
        :param context: An optional string that give a name to a corba context instance
        """
        self.v_max = -1  # bounds on the linear velocity for the root, negative values mean unused
        self.a_max = -1  # bounds on the linear acceleration for the root, negative values mean unused
        self.root_translation_bounds = [0] * 6  # bounds on the root translation position (-x, +x, -y, +y, -z, +z)
        self.root_rotation_bounds = [-3.14, 3.14, -0.01, 0.01, -0.01,
                                     0.01]  # bounds on the rotation of the root (-z, z, -y, y, -x, x)
        # The rotation bounds are only used during the random sampling, they are not enforced along the path
        self.extra_dof = 6  # number of extra config appended after the joints configuration, 6 to store linear root velocity and acceleration
        self.mu = 0.5  # friction coefficient between the robot and the environment
        self.used_limbs = []  # names of the limbs that must be in contact during all the motion
        self.size_foot_x = 0  # size of the feet along the x axis
        self.size_foot_y = 0  # size of the feet along the y axis
        self.q_init = []
        self.q_goal = []
        self.context = context
        if context:
            createContext(context)
            loadServerPlugin(context, 'rbprm-corba.so')
            loadServerPlugin(context, 'affordance-corba.so')
            self.hpp_client = Client(context=context)
            self.hpp_client.problem.selectProblem(context)
            self.rbprm_client = RbprmClient(context=context)
        else:
            self.hpp_client = None
            self.rbprm_client = None

    @abstractmethod
    def load_rbprm(self):
        """
        Build an rbprmBuilder instance for the correct robot and initialize it's extra config size
        """
        pass

    def set_configurations(self):
        self.rbprmBuilder.client.robot.setDimensionExtraConfigSpace(self.extra_dof)
        self.q_init = self.rbprmBuilder.getCurrentConfig()
        self.q_goal = self.rbprmBuilder.getCurrentConfig()
        self.q_init[2] = self.rbprmBuilder.ref_height
        self.q_goal[2] = self.rbprmBuilder.ref_height

    def compute_extra_config_bounds(self):
        """
        Compute extra dof bounds from the current values of v_max and a_max
        By default, set symmetrical bounds on x and y axis and bounds z axis values to 0
        """
        # bounds for the extradof : by default use v_max/a_max on x and y axis and 0 on z axis
        self.extra_dof_bounds = [
            -self.v_max, self.v_max, -self.v_max, self.v_max, 0, 0, -self.a_max, self.a_max, -self.a_max, self.a_max,
            0, 0
        ]

    def set_joints_bounds(self):
        """
        Set the root translation and rotation bounds as well as the the extra dofs bounds
        """
        self.rbprmBuilder.setJointBounds("root_joint", self.root_translation_bounds)
        self.rbprmBuilder.boundSO3(self.root_rotation_bounds)
        self.rbprmBuilder.client.robot.setExtraConfigSpaceBounds(self.extra_dof_bounds)

    def set_rom_filters(self):
        """
        Define which ROM must be in collision at all time and with which kind of affordances
        By default it set all the roms in used_limbs to be in contact with 'support' affordances
        """
        self.rbprmBuilder.setFilter(self.used_limbs)
        for limb in self.used_limbs:
            self.rbprmBuilder.setAffordanceFilter(limb, ['Support'])

    def init_problem(self):
        """
        Load the robot, set the bounds and the ROM filters and then
        Create a ProblemSolver instance and set the default parameters.
        The values of v_max, a_max, mu, size_foot_x and size_foot_y must be defined before calling this method
        """
        self.load_rbprm()
        self.set_configurations()
        self.compute_extra_config_bounds()
        self.set_joints_bounds()
        self.set_rom_filters()
        self.ps = ProblemSolver(self.rbprmBuilder)
        # define parameters used by various methods :
        if self.v_max >= 0:
            self.ps.setParameter("Kinodynamic/velocityBound", self.v_max)
        if self.a_max >= 0:
            self.ps.setParameter("Kinodynamic/accelerationBound", self.a_max)
        if self.size_foot_x > 0:
            self.ps.setParameter("DynamicPlanner/sizeFootX", self.size_foot_x)
        if self.size_foot_y > 0:
            self.ps.setParameter("DynamicPlanner/sizeFootY", self.size_foot_y)
        self.ps.setParameter("DynamicPlanner/friction", 0.5)
        # sample only configuration with null velocity and acceleration :
        self.ps.setParameter("ConfigurationShooter/sampleExtraDOF", False)

    def init_viewer(self, env_name, env_package="hpp_environments", reduce_sizes=[0, 0, 0], visualize_affordances=[]):
        """
        Build an instance of hpp-gepetto-viewer from the current problemSolver
        :param env_name: name of the urdf describing the environment
        :param env_package: name of the package containing this urdf (default to hpp_environments)
        :param reduce_sizes: Distance used to reduce the affordances plan toward the center of the plane
        (in order to avoid putting contacts closes to the edges of the surface)
        :param visualize_affordances: list of affordances type to visualize, default to none
        """
        vf = ViewerFactory(self.ps)
        if self.context:
            self.afftool = AffordanceTool(context=self.context)
            self.afftool.client.affordance.affordance.resetAffordanceConfig(
            )  # FIXME: this should be called in afftool constructor
        else:
            self.afftool = AffordanceTool()
        self.afftool.setAffordanceConfig('Support', [0.5, 0.03, 0.00005])
        self.afftool.loadObstacleModel("package://" + env_package + "/urdf/" + env_name + ".urdf",
                                       "planning",
                                       vf,
                                       reduceSizes=reduce_sizes)

        self.v = vf.createViewer(ghost=True, displayArrows=True)
        self.pp = PathPlayer(self.v)
        for aff_type in visualize_affordances:
            self.afftool.visualiseAffordances(aff_type, self.v, self.v.color.lightBrown)

    def init_planner(self, kinodynamic=True, optimize=True):
        """
        Select the rbprm methods, and the kinodynamic ones if required
        :param kinodynamic: if True, also select the kinodynamic methods
        :param optimize: if True, add randomShortcut path optimizer (or randomShortcutDynamic if kinodynamic is also True)
        """
        self.ps.selectConfigurationShooter("RbprmShooter")
        self.ps.selectPathValidation("RbprmPathValidation", 0.05)
        if kinodynamic:
            self.ps.selectSteeringMethod("RBPRMKinodynamic")
            self.ps.selectDistance("Kinodynamic")
            self.ps.selectPathPlanner("DynamicPlanner")
        if optimize:
            if kinodynamic:
                self.ps.addPathOptimizer("RandomShortcutDynamic")
            else:
                self.ps.addPathOptimizer("RandomShortcut")

    def solve(self, display_roadmap=False):
        """
        Solve the path planning problem.
        q_init and q_goal must have been defined before calling this method
        """
        if len(self.q_init) != self.rbprmBuilder.getConfigSize():
            raise ValueError("Initial configuration vector do not have the right size")
        if len(self.q_goal) != self.rbprmBuilder.getConfigSize():
            raise ValueError("Goal configuration vector do not have the right size")
        self.ps.setInitialConfig(self.q_init)
        self.ps.addGoalConfig(self.q_goal)
        self.v(self.q_init)
        if display_roadmap and self.v.client.gui.getNodeList() is not None:
            t = self.v.solveAndDisplay("roadmap", 5, 0.001)
        else:
            t = self.ps.solve()
        print("Guide planning time : ", t)

    def display_path(self, path_id=-1, dt=0.1):
        """
        Display the path in the viewer, if no path specified display the last one
        :param path_id: the Id of the path specified, default to the most recent one
        :param dt: discretization step used to display the path (default to 0.1)
        """
        if self.pp is not None:
            if path_id < 0:
                path_id = self.ps.numberPaths() - 1
            self.pp.dt = dt
            self.pp.displayVelocityPath(path_id)

    def play_path(self, path_id=-1, dt=0.01):
        """
        play the path in the viewer, if no path specified display the last one
        :param path_id: the Id of the path specified, default to the most recent one
        :param dt: discretization step used to display the path (default to 0.01)
        """
        self.show_rom()
        if self.pp is not None:
            if path_id < 0:
                path_id = self.ps.numberPaths() - 1
            self.pp.dt = dt
            self.pp(path_id)

    def hide_rom(self):
        """
        Remove the current robot from the display
        """
        self.v.client.gui.setVisibility(self.robot_node_name, "OFF")

    def show_rom(self):
        """
        Add the current robot to the display
        """
        self.v.client.gui.setVisibility(self.robot_node_name, "ON")

    @abstractmethod
    def run(self):
        """
        Must be defined in the child class to run all the methods with the correct arguments.
        """
        # example of definition:
        """
        self.init_problem()
        # define initial and goal position
        self.q_init[:2] = [0, 0]
        self.q_goal[:2] = [1, 0]

        self.init_viewer("multicontact/ground", visualize_affordances=["Support"])
        self.init_planner()
        self.solve()
        self.display_path()
        self.play_path()
        """
        pass
# Difficult init config
q1 = [1.45, 1.05, -0.8, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.8, 1.0, -1.0, -0.85, 0.0, -0.65, 0.174532, -0.174532, 0.174532, -0.174532, 0.174532, -0.174532, -1.9, 0.0, -0.6, -0.3, 0.7, -0.4, 0.174532, -0.174532, 0.174532, -0.174532, 0.174532, -0.174532, 0.1, -0.15, -0.1, 0.3, -0.418879, 0.0, 0.0, 0.3, -0.8, 0.3, 0.0, 0.0]

q2 = [6.55, -2.91, 1.605, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.2, 1.0, -0.4, -1.0, 0.0, -0.2, 0.174532, -0.174532, 0.174532, -0.174532, 0.174532, -0.174532, -1.5, -0.2, 0.1, -0.3, 0.1, 0.1, 0.174532, -0.174532, 0.174532, -0.174532, 0.174532, -0.174532, -0.2, 0.6, -0.453786, 0.872665, -0.418879, 0.2, -0.4, 0.0, -0.453786, 0.1, 0.7, 0.0]

q2hard = [7.60, -2.41, 0.545, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.8, 0.0, -0.4, -0.55, 0.0, -0.6, 0.174532, -0.174532, 0.174532, -0.174532, 0.174532, -0.174532, -2.8, 0.0, 0.1, -0.2, -0.1, 0.4, 0.174532, -0.174532, 0.174532, -0.174532, 0.174532, -0.174532, -0.2, 0.6, -0.1, 1.2, -0.4, 0.2, -0.3, 0.0, -0.4, 0.2, 0.7, 0.0]

robot.isConfigValid(q1)
robot.isConfigValid(q2)

# qf should be invalid
qf = [1, -3, 3, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.2, 1.0, -0.4, -1.0, 0.0, -0.2, 0.174532, -0.174532, 0.174532, -0.174532, 0.174532, -0.174532, -1.5, -0.2, 0.1, -0.3, 0.1, 0.1, 0.174532, -0.174532, 0.174532, -0.174532, 0.174532, -0.174532, -0.2, 0.6, -0.453786, 0.872665, -0.418879, 0.2, -0.4, 0.0, -0.453786, 0.1, 0.7, 0.0]
robot.isConfigValid(qf)

ps.setInitialConfig (q1); ps.addGoalConfig (q2); ps.solve ()

ps.solve ()
ps.pathLength(0)

ps.addPathOptimizer('RandomShortcut')
ps.optimizePath (0)
ps.pathLength(1)

ps.clearPathOptimizers()
ps.addPathOptimizer("GradientBased")
ps.optimizePath (0)
ps.numberPaths()
ps.pathLength(ps.numberPaths()-1)

pp(ps.numberPaths()-1)
Beispiel #8
0
q3 = [1.45, 1.05, -0.8, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.8, 1.0, -1.0, -0.85, 0.0, -0.65, 0.174532, -0.174532, 0.174532, -0.174532, 0.174532, -0.174532, -1.9, 0.0, -0.6, -0.3, 0.7, -0.4, 0.174532, -0.174532, 0.174532, -0.174532, 0.174532, -0.174532, 0.1, -0.15, -0.1, 0.3, -0.418879, 0.0, 0.0, 0.3, -0.8, 0.3, 0.0, 0.0]

q2 = [6.55, -2.91, 1.605, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.2, 1.0, -0.4, -1.0, 0.0, -0.2, 0.174532, -0.174532, 0.174532, -0.174532, 0.174532, -0.174532, -1.5, -0.2, 0.1, -0.3, 0.1, 0.1, 0.174532, -0.174532, 0.174532, -0.174532, 0.174532, -0.174532, -0.2, 0.6, -0.453786, 0.872665, -0.418879, 0.2, -0.4, 0.0, -0.453786, 0.1, 0.7, 0.0]

q4 = [7.60, -2.41, 0.545, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.8, 0.0, -0.4, -0.55, 0.0, -0.6, 0.174532, -0.174532, 0.174532, -0.174532, 0.174532, -0.174532, -2.8, 0.0, 0.1, -0.2, -0.1, 0.4, 0.174532, -0.174532, 0.174532, -0.174532, 0.174532, -0.174532, -0.2, 0.6, -0.1, 1.2, -0.4, 0.2, -0.3, 0.0, -0.4, 0.2, 0.7, 0.0]


# Load obstacles in HPP
cl.obstacle.loadObstacleModel('gravity_description','gravity_decor','') # do not move (position in urdf)
cl.obstacle.loadObstacleModel('gravity_description','emu','') # loaded as an obstacle for now
position_emu= (0,-1,0.2,1,0,0,0)
cl.obstacle.moveObstacle ('emu_base', position_emu)


# Problem
ps.setInitialConfig (q3)
ps.addGoalConfig (q2) # q4 or q2
begin=time.time()
ps.solve ()
end=time.time()
print "Solving time: "+str(end-begin)


# Display spaceship environment and Emu guy
r.addObject('spaceShip','obstacle_base') # display
r.moveObject('spaceShip',(0,0,0,1,0,0,0))
r.addObject('emu','emu_base') # display
position_emu= (0,-1,0.2,1,0,0,0)
r.moveObject('emu',position_emu)
r(q3)
ps = ProblemSolver (robot)
cl = robot.client

# Configs : [x, y, z, qw, qx, qy, qz, nx, ny, nz, theta]
q1 = [0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0]
q2 = [1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0]


from hpp.gepetto import Viewer, PathPlayer
r = Viewer (ps)
pp = PathPlayer (robot.client, r)
#r.loadObstacleModel ("animals_description","cave","cave")
#cl.obstacle.loadObstacleModel('animals_description','cave','cave')
r(q2)

ps.setInitialConfig (q1); ps.addGoalConfig (q2); ps.solve ()

# verif orientation:
r(ps.getWaypoints (0)[0]) # ref
r(ps.getWaypoints (1)[0]) # should be good...

index = cl.robot.getConfigSize () - 4

q = q1[::]
plotStraightLine ([q[index], q[index+1], q[index+2]], q, r, "normale")
plotCone (q1, cl, r, 0.5, 0.2, "cone1")
plotCone (q2, cl, r, 0.5, 0.2, "cone2")

r( ps.configAtParam(0,2) )
ps.pathLength(0)
ps.getWaypoints (0)

# Load obstacles in HPP
cl.obstacle.loadObstacleModel('gravity_description','gravity_decor','') # do not move (position in urdf)
cl.obstacle.loadObstacleModel('gravity_description','emu','') # loaded as an obstacle for now
position_emu= (0,-1,0.2,1,0,0,0)
cl.obstacle.moveObstacle ('emu_base', position_emu)

q1 = [-2, 0.01, 0.705, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.8, 0.8, -1.0, -1.0, 0.0, 0.2, 0.174532, -0.174532, 0.174532, -0.174532, 0.174532, -0.174532, 0.7, -0.8, 0.1, -0.523599, 0.1, -0.3, 0.174532, -0.174532, 0.174532, -0.174532, 0.174532, -0.174532, 0.6, 0.1, -0.453786, 0.872665, -0.418879, 0.2, -0.4, 0.0, -0.453786, 0.1, 0.7, 0.0]

q2 = [2, 0.01, 0.705, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.8, 0.8, -1.0, -1.0, 0.0, 0.2, 0.174532, -0.174532, 0.174532, -0.174532, 0.174532, -0.174532, 0.7, -0.8, 0.1, -0.523599, 0.1, -0.3, 0.174532, -0.174532, 0.174532, -0.174532, 0.174532, -0.174532, 0.6, 0.1, -0.453786, 0.872665, -0.418879, 0.2, -0.4, 0.0, -0.453786, 0.1, 0.7, 0.0]

qconf = [0, 0.01, 0.705, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.8, 0.8, -1.0, -1.0, 0.0, 0.2, 0.174532, -0.174532, 0.174532, -0.174532, 0.174532, -0.174532, 0.7, -0.8, 0.1, -0.523599, 0.1, -0.3, 0.174532, -0.174532, 0.174532, -0.174532, 0.174532, -0.174532, 0.6, 0.1, -0.453786, 0.872665, -0.418879, 0.2, -0.4, 0.0, -0.453786, 0.1, 0.7, 0.0]

# Problem
ps.setInitialConfig (q3)
ps.addGoalConfig (q2)
ps.solve () # 0-1
ps.setInitialConfig (q2)
ps.resetGoalConfigs ()
ps.addGoalConfig (q2to4)
ps.solve () # 2-3
ps.setInitialConfig (q2to4)
cl.problem.resetGoalConfigs ()
ps.addGoalConfig (q4)
ps.solve () # 4-5
p(1) #to play the solution

begin=time.time()
ps.solve ()
end=time.time()
Beispiel #11
0
from hpp.corbaserver.robot_2d import Robot
from hpp.corbaserver import ProblemSolver
from hpp.corbaserver import Client
import time
import sys
import matplotlib.pyplot as plt
sys.path.append('/local/mcampana/devel/hpp/src/test-hpp/script')

robot = Robot ('robot_2d')
ps = ProblemSolver (robot)
cl = robot.client
cl.obstacle.loadObstacleModel('robot_2d_description','cylinder_obstacle','')

# q = [x, y] # limits in URDF file
"""
q1 = [-2, 0]; q2 = [-0.2, 2]; q3 = [0.2, 2]; q4 = [2, 0]
ps.setInitialConfig (q1); ps.addGoalConfig (q2); ps.solve (); ps.resetGoalConfigs ()
ps.setInitialConfig (q2); ps.addGoalConfig (q3); ps.solve (); ps.resetGoalConfigs ()
ps.setInitialConfig (q3); ps.addGoalConfig (q4); ps.solve (); ps.resetGoalConfigs ()
ps.setInitialConfig (q1); ps.addGoalConfig (q4); ps.solve (); #3
"""

q1 = [-2, 0]; q2 = [-1, 1]; q3 = [-1.2, 1.8]; q4 = [-0.2, 1.2];  q5 = [0.5, 1.9]
q6 = [2, 1.5]; q7 = [1, 0.5]; q8 = [2, 0]
ps.setInitialConfig (q1); ps.addGoalConfig (q2); ps.solve (); ps.resetGoalConfigs ()
ps.setInitialConfig (q2); ps.addGoalConfig (q3); ps.solve (); ps.resetGoalConfigs ()
ps.setInitialConfig (q3); ps.addGoalConfig (q4); ps.solve (); ps.resetGoalConfigs ()
ps.setInitialConfig (q4); ps.addGoalConfig (q5); ps.solve (); ps.resetGoalConfigs ()
ps.setInitialConfig (q5); ps.addGoalConfig (q6); ps.solve (); ps.resetGoalConfigs ()
ps.setInitialConfig (q6); ps.addGoalConfig (q7); ps.solve (); ps.resetGoalConfigs ()
Beispiel #12
0
class Agent(Client):
    robot = None
    platform = None
    index = 0
    ps = None
    # we load other agents as ghosts to reduce the computation time while planning
    ghosts = []
    ghost_urdf = ''
    ghost_package = ''
    # to avoid confusion, we use start and end instead of init and goal
    start_config = []
    end_config = []
    current_config = []  # this is not used for now
    permitted_plan = []  # this is the plan generated
    repeat = 0  # to help the selectProblem function

    # an agent should have a robot and the start and end configuration
    # to avoid confusion, we use start_config instead of init_config  and
    # end_confi instead of goal_config
    def __init__(self, robot, start, end):
        Client.__init__(self)
        self.repeat = 0
        # print 'creating an agent of type ', robotType
        self.robot = robot
        self.start_config = start
        self.end_config = end
        self.current_config = self.start_config
        self.__plan_proposed = []

    # once all agents are generated, we may register the agents to a platform
    def registerPlatform(self, platform, index):
        self.platform = platform
        self.index = index

    # this function gives some information about the agent and robot it is managing
    def printInformation(self):
        print '-------------------------------------------'
        print 'name of the robot:\t', self.robot.name
        print 'configuration size:\t', self.robot.getConfigSize()
        print 'degree of freedom:\t', self.robot.getNumberDof()
        print 'mass of the robot:\t', self.robot.getMass()
        print 'the center of mass:\t', self.robot.getCenterOfMass()
        config = self.robot.getCurrentConfig()
        nm = self.robot.getJointNames()
        print 'there are ', len(nm), 'joint names in total. They are:'
        for i in range(len(nm)):
            lower = self.robot.getJointBounds(nm[i])[0]
            upper = self.robot.getJointBounds(nm[i])[1]
            print 'joint name: ', nm[
                i], '\trank in configuration:', self.robot.rankInConfiguration[
                    nm[i]],
            print '\tlower bound: {0:.3f}'.format(
                lower), '\tupper bound: {0:.3f}'.format(upper)

    # set up the environment
    def setEnvironment(self):
        if self.platform.env != None:
            self.ps.loadObstacleFromUrdf(self.platform.env.packageName,
                                         self.platform.env.urdfName,
                                         self.platform.env.name)
            # self.ps.moveObstacle('airbase_link_0', [0,0, -3, 1,0,0,0])

    # load the other agents to the problem solver
    def loadOtherAgents(self):
        # print 'There are ', len(self.platform.agents), 'agents'
        #load ghost agents
        for a in self.platform.agents:
            if (a.index != self.index):
                # if it is not itself then load a ghost agent
                g = Ghost()
                self.ps.loadObstacleFromUrdf(
                    g.packageName, g.urdfName,
                    a.robot.name)  # it's the robot's name!!!
                # and then place it at the initial location of the agent
                # print self.robot.name, ' is now loading ', a.robot.name, ' as a ghost'
                config = a.current_config
                spec = self.getMoveSpecification(config)
                spec[2] = 0.3
                self.obstacle.moveObstacle(a.robot.name + 'base_link_0', spec)

    # load agents from the node
    def loadOtherAgentsFromNode(self, node):
        print 'There are ', len(self.platform.agents), 'agents'
        #load ghost agents
        for a in self.platform.agents:
            if (a.index != self.index):
                # if it is not itself then load a ghost agent
                g = Ghost()
                self.ps.loadObstacleFromUrdf(
                    g.packageName, g.urdfName,
                    a.robot.name)  # it's the robot's name!!!
                # and then place it at the initial location of the agent
                config = node.getAgentCurrentConfig(a.index)
                spec = self.getMoveSpecification(config)
                self.obstacle.moveObstacle(a.robot.name + 'base_link_0', spec)
                print self.robot.name, ' is now loading ', a.robot.name, ' as a ghost', 'it is at ', spec[
                    0], spec[1]

    # note that the default solver does not consider the position of other agents
    def startDefaultSolver(self):
        self.repeat += 1
        name = self.robot.name
        self.problem.selectProblem(str(self.index) + ' ' + str(self.repeat))
        self.robot = HyQ(name)
        self.ps = ProblemSolver(self.robot)
        self.ps.setInitialConfig(self.start_config)
        self.ps.addGoalConfig(self.end_config)
        self.ps.selectPathPlanner("VisibilityPrmPlanner")
        self.ps.addPathOptimizer("RandomShortcut")

    # initialise a solver from a node, the node contains information about other agents and itself
    # this method is used when proposing plans while interacting with platform for MAS path planning
    def startNodeSolver(self, node):
        self.repeat += 1
        name = self.robot.name
        self.problem.selectProblem(str(self.index) + ' ' + str(self.repeat))
        self.robot = HyQ(name)
        self.ps = ProblemSolver(self.robot)
        cfg = node.getAgentCurrentConfig(self.index)
        print 'this iteration, the agent', name, 'starts from ', cfg[0], cfg[1]
        self.ps.setInitialConfig(cfg)
        self.ps.addGoalConfig(self.end_config)
        self.ps.selectPathPlanner("VisibilityPrmPlanner")
        self.ps.addPathOptimizer("RandomShortcut")

    # this is only used when the agent takes too long (30 seconds) while planning
    def terminate_solving(self):
        self.problem.interruptPathPlanning()

    # the solve method for problem solver but with a time bound
    def solve(self):
        # try catch -------------------
        try:
            t = Timer(30.0, self.terminate_solving)
            t.start()
            print 'solved: ', self.ps.solve()
            t.cancel()
        except Error as e:
            print e.msg
            print '***************\nfailed to plan within limited time\n**************'
            return -1

        # self.repeat += 1

    # store the path for two reasons:
    # 1. store as a default plan
    # 2. to continue the path
    def storePath(self, choice=0, segments=8):
        # always store the first one for now
        self.__plan_proposed = []
        for p in range(int(round(segments * self.ps.pathLength(choice)))):
            self.__plan_proposed.append(
                self.ps.configAtParam(choice, p * 1.0 / segments))

        # the last configuration is the goal configuration
        if self.ps.configAtParam(
                choice, self.ps.pathLength(choice)) == self.end_config:
            self.__plan_proposed.append(self.end_config)
        print 'stored; plan length: ', len(self.__plan_proposed)

    # this is hard colded for now for the airplane example, we should introduce an entry for the environment
    # class about it.
    def setBounds(self):
        self.robot.setJointBounds("base_joint_xy", [-35, 10, -2.6, 4.3])

    #the rest are just some helping functions
    def getConfigOfProposedPlanAtTime(self, index):
        return self.__plan_proposed[index]

    def getConfigOfPermittedPlanAtTime(self, index):
        return self.permitted_plan[index]

    def getProposedPlanLength(self):
        return len(self.__plan_proposed)

    def setPermittedPlan(self, plan):
        self.permitted_plan = plan

    def getPermittedPlanLength(self):
        return len(self.permitted_plan)

    # export the permitted plan to a specific file in the format
    # 	agent X
    # 	config 1
    # 	config 2
    # 	etc
    def exportPermittedPlan(self, filename):
        f = open(filename, 'a+')
        f.write('agent ' + str(self.index) + '\n')
        for p in self.permitted_plan:
            f.write(str(p)[1:-1] + '\n')
        f.close()

    # for the sake of manipulation, we return a copy of it
    def obtainPermittedPlan(self):
        return copy.copy(self.permitted_plan)

        # we will get only a copy of it, not the original one
        # to remind the difference, we use 'obtain' instead of 'get'
    def obtainProposedPlan(self):
        return copy.copy(
            self.__plan_proposed
        )  #for some reason, sometimes the value would maybe changed???

    # to transfer the specification from 2D to 3D
    def getMoveSpecification(self, config):
        x = config[0]
        y = config[1]
        th = atan2(config[3], config[2])
        # print 'sin = ', self.init_config[3], ' cos = ', self.init_config[2], ' th = ', th
        return [x, y, 0, cos(th / 2), 0, 0, sin(th / 2)]

    # the function to compute a plan, exceptions are not handled in this simple demo
    def computePlan(self, node):
        self.startNodeSolver(node)
        self.setBounds()
        self.setEnvironment()
        self.loadOtherAgentsFromNode(node)
        if self.solve() != -1:
            self.storePath()
        else:
            self.__plan_proposed = self.__plan_proposed[node.progress_time::]
            [node.getAgentCurrentConfig(self.index)]
            print 'take the previous one and continue the searching'
            return -1
from hpp.corbaserver import Client
import time

robot = Robot ('puzzle_robot') # object5
robot.setJointBounds('base_joint_xyz', [-0.1, 0.1, -0.1, 0.1, -0.1, 0.1])

ps = ProblemSolver (robot)
r = Viewer (ps)
cl = robot.client
pp = PathPlayer (cl, r)

# Patchwork of path
q1 = [0, 0, 0, 1, 0, 0, 0]
q2 = [0, 0, 0, 0.707106781, 0, 0, 0.707106781]
# equivalent to : [0, 0, 0, -0.707106781, 0, 0, -0.707106781] q7
ps.setInitialConfig (q1); ps.addGoalConfig (q2)
ps.solve (); ps.resetGoalConfigs ()

q3 = [0, 0, 0, 0, 0, 0, 1]
# equivalent to : [0, 0, 0, 0, 0, 0, -1] q8
ps.setInitialConfig (q2); ps.addGoalConfig (q3)
ps.solve (); ps.resetGoalConfigs ()

q4 = [0, 0, 0, -0.707106781, 0, 0, 0.707106781]
# equivalent to : [0, 0, 0, 0.707106781, 0, 0, -0.707106781] q9
ps.setInitialConfig (q3); ps.addGoalConfig (q4)
ps.solve (); ps.resetGoalConfigs ()

q5 = [0, 0, 0, -1, 0, 0, 0]
# equivalent to : [0, 0, 0, 1, 0, 0, 0] q1, q10
ps.setInitialConfig (q4); ps.addGoalConfig (q5)
Beispiel #14
0
from hpp.corbaserver.robot_2d import Robot
from hpp.corbaserver import ProblemSolver
from hpp.corbaserver import Client
import time
import sys
import matplotlib.pyplot as plt
sys.path.append('/local/mcampana/devel/hpp/src/test-hpp/script')

robot = Robot ('robot_2d')
ps = ProblemSolver (robot)
cl = robot.client
cl.obstacle.loadObstacleModel('robot_2d_description','cylinder_obstacle','')

# q = [x, y] # limits in URDF file
q1 = [-2, 0]; q2 = [-0.2, 2]; q3 = [0.2, 2]; q4 = [2, 0]
ps.setInitialConfig (q1); ps.addGoalConfig (q2); ps.solve (); ps.resetGoalConfigs ()
ps.setInitialConfig (q2); ps.addGoalConfig (q3); ps.solve (); ps.resetGoalConfigs ()
ps.setInitialConfig (q3); ps.addGoalConfig (q4); ps.solve (); ps.resetGoalConfigs ()
ps.setInitialConfig (q1); ps.addGoalConfig (q4); ps.solve (); ps.resetGoalConfigs ()
# pp(3) = p0 final

#ps.addPathOptimizer("GradientBased")
#ps.addPathOptimizer("Prune")
ps.addPathOptimizer("PartialRandomShortcut")
ps.optimizePath(3) # pp(4) = p1 final

ps.pathLength(3)
ps.pathLength(4)
ps.getWaypoints (3)
ps.getWaypoints (4)
# should be [-0.07 0] [0.07 0] if alpha_init=1
Beispiel #15
0
robot = Robot('ur5')
ps = ProblemSolver(robot)

vf = ViewerFactory(ps)

vf.loadObstacleModel("hpp_practicals", "ur_benchmark/obstacles", "obstacles")
vf.loadObstacleModel("hpp_practicals", "ur_benchmark/table", "table")
vf.loadObstacleModel("hpp_practicals", "ur_benchmark/wall", "wall")

r = vf.createViewer()

q1 = [0, -1.57, 1.57, 0, 0, 0]
q2 = [0.2, -1.57, -1.8, 0, 0.8, 0]
q3 = [1.57, -1.57, -1.8, 0, 0.8, 0]

r(q2)
r(q1)

ps.setInitialConfig(q2)
ps.addGoalConfig(q1)

from motion_planner import MotionPlanner
m = MotionPlanner(robot, ps)
pathId = m.solveBiRRT(maxIter=1000)
ps.solve()
pp = PathPlayer(robot.client, r)
pid = ps.numberPaths() - 1
if pid < 0: raise RuntimeError("No path in vector")

pp(pid)
sys.path.append('/local/mcampana/devel/hpp/src/test-hpp/script')

kRange = 5
robot = Robot ('simple_robot')
robot.setJointBounds('base_joint_xy', [-kRange, kRange, -kRange, kRange])
ps = ProblemSolver (robot)
cl = robot.client
Viewer.withFloor = True
r = Viewer (ps)
pp = PathPlayer (cl, r)

# q = [x, y, z, theta] # (z not considered since planar)
q1 = [-4, 4, 1, 0]; q2 = [4, -4, 1, 0] # obstS 1
#q1 = [2.4, -4.6, 1.0, 0.0]; q2 = [-0.4, 4.6, 1.0, 0.0] # obstS 2

ps.setInitialConfig (q1)
ps.addGoalConfig (q2)

# Load box obstacle in HPP for collision avoidance #
#cl.obstacle.loadObstacleModel('potential_description','cylinder_obstacle','')
cl.obstacle.loadObstacleModel('potential_description','obstacles_concaves','')
r.loadObstacleModel ("potential_description","obstacles_concaves","obstacles_concaves") # in viewer !

ps.createOrientationConstraint ("orConstraint", "base_joint_rz", "", [0.7071067812
,0,0,0.7071067812], [0,0,1]) # OK
T = [[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,1]]
ps.setNumericalConstraints ("constraints", ["orConstraint"])

ps.solve ()
begin=time.time()
cl.problem.optimizePath(0)
r(q_init)

q_goal[0:2] = [-3.2, -4]
rank = robot.rankInConfiguration['l_shoulder_lift_joint']
q_goal[rank] = 0.5
rank = robot.rankInConfiguration['l_elbow_flex_joint']
q_goal[rank] = -0.5
rank = robot.rankInConfiguration['r_shoulder_lift_joint']
q_goal[rank] = 0.5
rank = robot.rankInConfiguration['r_elbow_flex_joint']
q_goal[rank] = -0.5
r(q_goal)

ps.loadObstacleFromUrdf("iai_maps", "kitchen_area", "")

ps.setInitialConfig(q_init)
ps.addGoalConfig(q_goal)
ps.selectPathPlanner("PRM")
#ps.solve ()

ps.prepareSolveStepByStep()
ps.executeOneStep()
ps.executeOneStep()
ps.executeOneStep()
ps.executeOneStep()
ps.executeOneStep()
ps.executeOneStep()
ps.executeOneStep()
ps.executeOneStep()
ps.executeOneStep()
ps.executeOneStep()
robot.setJointBounds('j_translation_y', [-2, 4])

# q = [x, y] # limits in URDF file
q1 = [-7, 3]
q2 = [-8, 2]
q3 = [-6, 2]
q4 = [-7, 1]
q5 = [-5, -1]
q6 = [-8, -1]
q7 = [-4, 1]
q8 = [-2, -1]
q9 = [0.2, 1.6]
q10 = [1, -1]
q11 = [4, 1]
q12 = [5, -1]
ps.setInitialConfig(q1)
ps.addGoalConfig(q2)
ps.solve()
ps.resetGoalConfigs()
ps.setInitialConfig(q2)
ps.addGoalConfig(q3)
ps.solve()
ps.resetGoalConfigs()
ps.setInitialConfig(q3)
ps.addGoalConfig(q4)
ps.solve()
ps.resetGoalConfigs()
ps.setInitialConfig(q4)
ps.addGoalConfig(q5)
ps.solve()
ps.resetGoalConfigs()
# Configs : [x, y, z, q1, q2, q3, q4, dir.x, dir.y, dir.z, theta]
q11 = [-3.8, 2.4, 1.2, 0, 0, 0, 1, 0, 0, 1, 0]; q22 = [3.5, -3.3, 0.4, 0, 0, 0, 1, 0, 0, 1, 0]
#cl.obstacle.loadObstacleModel('animals_description','envir3d_window_mesh','')

from hpp.gepetto import Viewer, PathPlayer
r = Viewer (ps)
pp = PathPlayer (robot.client, r)
r.loadObstacleModel ("animals_description","envir3d_window_mesh","envir3d_window_mesh")
addLight (r, [-3,3,4,1,0,0,0], "li"); addLight (r, [3,-3,4,1,0,0,0], "li1")
r(q11)

q1 = cl.robot.projectOnObstacle (q11, 0.001); q2 = cl.robot.projectOnObstacle (q22, 0.001)
robot.isConfigValid(q1); robot.isConfigValid(q2)
r(q1)

ps.setInitialConfig (q1); ps.addGoalConfig (q2)

plotSphere (q2, cl, r, "sphere_q2", [0,1,0,1], 0.02) # same as robot
nPointsPlot = 50
offsetOrientedPath = 2 # If remove oriented path computation in ProblemSolver, set to 1 instead of 2
plotFrame (r, "_", [0,0,2.8], 0.5)

# First parabola(s): vmax = 6.8m/s,  mu = 1.2
cl.problem.setFrictionCoef(1.2); cl.problem.setMaxVelocityLim(6.8)
plotCone (q1, cl, r, "cone2", "friction_cone2"); plotCone (q2, cl, r, "cone22", "friction_cone2")
ps.clearRoadmap();
solveTime = ps.solve () # 0.085 s
pahtId = ps.numberPaths()-offsetOrientedPath # path without orientation stuff
samples = plotSampleSubPath (cl, r, pahtId, nPointsPlot, "path0", [0,0,1,1])
plotConeWaypoints (cl, pahtId, r, "wp0", "friction_cone2")
plotSpheresWaypoints (cl, pahtId, r, "sphere_wp0", [0,0,1,1], 0.02)
from hpp.corbaserver.practicals.ur5 import Robot
from hpp.corbaserver import ProblemSolver
from hpp.gepetto import ViewerFactory, PathPlayer

robot = Robot ('ur5')
ps = ProblemSolver (robot)

vf = ViewerFactory (ps)

vf.loadObstacleModel ("hpp_practicals","ur_benchmark/obstacles","obstacles")
vf.loadObstacleModel ("hpp_practicals","ur_benchmark/table","table")
vf.loadObstacleModel ("hpp_practicals","ur_benchmark/wall","wall")

v = vf.createViewer ()

q1 = [0, -1.57, 1.57, 0, 0, 0]; q2 = [0.2, -1.57, -1.8, 0, 0.8, 0]
q3 = [1.57, -1.57, -1.8, 0, 0.8, 0]

v (q2)
v (q3)

ps.setInitialConfig (q2)
ps.addGoalConfig (q3)

from motion_planner import MotionPlanner
m = MotionPlanner (robot, ps)
pathId = m.solveBiRRT (maxIter = 1000)

pp = PathPlayer (v)
#pp (pathId)
#original from antonio
q2=[0,-2.5,0.64870180180254433111, 0.7101853756232854, 0, 0, -0.7040147244559684, 0,0,0,0,0.26179900000000000393, 0.1745299999999999907,0,-0.52359900000000003661,0,0,0.1745319999999999927, -0.1745319999999999927,0.1745319999999999927,-0.1745319999999999927, 0.1745319999999999927,-0.1745319999999999927,0.26179900000000000393, -0.1745299999999999907,0,-0.52359900000000003661,0,0,0.1745319999999999927, -0.1745319999999999927,0.1745319999999999927,-0.1745319999999999927, 0.1745319999999999927,-0.1745319999999999927,0,0,-0.45378600000000002268, 0.87266500000000002402,-0.41887900000000000134,0,0,0,-0.45378600000000002268, 0.87266500000000002402,-0.41887900000000000134,0]
#q2=[0,-2.5,0.64870180180254433111, 1,0,0,0, 0,0,0,0,0.26179900000000000393, 0.1745299999999999907,0,-0.52359900000000003661,0,0,0.1745319999999999927, -0.1745319999999999927,0.1745319999999999927,-0.1745319999999999927, 0.1745319999999999927,-0.1745319999999999927,0.26179900000000000393, -0.1745299999999999907,0,-0.52359900000000003661,0,0,0.1745319999999999927, -0.1745319999999999927,0.1745319999999999927,-0.1745319999999999927, 0.1745319999999999927,-0.1745319999999999927,0,0,-0.45378600000000002268, 0.87266500000000002402,-0.41887900000000000134,0,0,0,-0.45378600000000002268, 0.87266500000000002402,-0.41887900000000000134,0]

publisher = ScenePublisher(robot)
publisher(q2)
time.sleep(1)
publisher(q1)

## Add constraints
solver = ProblemSolver (robot)
print "diffusing planner (general RRT implementation)"
solver.selectPathPlanner("DiffusingPlanner")
solver.loadObstacleFromUrdf("hpp_ros","wall")

solver.setInitialConfig (q1)
solver.addGoalConfig (q2)

#pc.invokeIrreduciblePlanner()
#mpc = MPClient()
#pc = mpc.precomputation
#cnames = pc.getNumericalConstraints (robot.getInitialConfig())
#solver.setNumericalConstraints ("stability-constraints", cnames)
## add obstacles
print "solve"
start_time = float(time.time())
solver.solve ()
end_time = float(time.time())
seconds = end_time - start_time
hours = seconds/3600
print("--- %s seconds ---" % seconds)
robot.setJointBounds('base_joint_xyz', [xStone-2, xStone+2, yEmu-1, yEmu+2, zEmu-0.5, zEmu+0.5])
# List of configs
q1 = [xStone+2, yEmu+0, zEmu, 1.0, 0.0, 0.0, 0.0]
q2 = [xStone+1.5, yEmu+0.8, zEmu, 0.707106781, 0, 0, 0.707106781]
q3 = [xStone+1, yEmu+1.4, zEmu, 0, 0, 0, 1]
q4 = [xStone+0.5, yEmu+1.7, zEmu, -0.707106781, 0, 0, 0.707106781]
#q5 = [xStone+0, yEmu+2, zEmu, -1, 0, 0, 0]
q5 = [xStone+0, yEmu+2, zEmu, -0.99, 0.14003571, 0.013, 0.011]
q6 = [xStone-0.5, yEmu+1.7, zEmu, -0.707106781, 0, 0, -0.707106781]
q7 = [xStone-1, yEmu+1.4, zEmu, 0, 0, 0, -1]
q8 = [xStone-1.5, yEmu+0.8, zEmu, 0.707106781, 0, 0, -0.707106781]
q9 = [xStone-2, yEmu+0, zEmu, 1, 0, 0, 0]
r(q1)
robot.isConfigValid(q1)

ps.setInitialConfig (q1); ps.addGoalConfig (q2); ps.solve (); ps.resetGoalConfigs ()
ps.setInitialConfig (q2); ps.addGoalConfig (q3); ps.solve (); ps.resetGoalConfigs ()
ps.setInitialConfig (q3); ps.addGoalConfig (q4); ps.solve (); ps.resetGoalConfigs ()
ps.setInitialConfig (q4); ps.addGoalConfig (q5); ps.solve (); ps.resetGoalConfigs ()
ps.setInitialConfig (q5); ps.addGoalConfig (q6); ps.solve (); ps.resetGoalConfigs ()
ps.setInitialConfig (q6); ps.addGoalConfig (q7); ps.solve (); ps.resetGoalConfigs ()
ps.setInitialConfig (q7); ps.addGoalConfig (q8); ps.solve (); ps.resetGoalConfigs ()
ps.setInitialConfig (q8); ps.addGoalConfig (q9); ps.solve (); ps.resetGoalConfigs ()
ps.setInitialConfig (q1); ps.addGoalConfig (q9); ps.solve (); ps.resetGoalConfigs ()

nInitialPath = ps.numberPaths()-1 #8
ps.pathLength(nInitialPath)

#ps.addPathOptimizer('RandomShortcut') #9
#ps.optimizePath (nInitialPath)
#ps.pathLength(ps.numberPaths()-1)
qt = [0, 0, 0, math.sqrt (1/3.), math.sqrt (1/3.), 0.0, math.sqrt (1/3.)]
qt2 = [0, 0, 0, math.sqrt (1 - 0.95**2), 0.95, 0.0, 0.0]

Q.append (qt)
Q.append (qt2)
Q.append ( [0, 0, 0, 0.99, 0.14003571, 0.013, 0.011]) # to remove

r(Q[0])
for i in range(0, len(Q)):
    r(Q[i])
    time.sleep (0.5)

#robot.isConfigValid(Q[0])

for i in range(0, len(Q)-1):
    ps.setInitialConfig (Q[i]); ps.addGoalConfig (Q[i+1]); ps.solve (); ps.resetGoalConfigs ()

ps.setInitialConfig (Q[0]); ps.addGoalConfig (Q[len(Q)-1]); ps.solve ();



nInitialPath = ps.numberPaths()-1 #8
ps.pathLength(nInitialPath)

#ps.addPathOptimizer('RandomShortcut') #9
#ps.optimizePath (nInitialPath)
#ps.pathLength(ps.numberPaths()-1)

#ps.clearPathOptimizers()
ps.addPathOptimizer("GradientBased")
ps.optimizePath (nInitialPath)
import sys
import matplotlib.pyplot as plt
sys.path.append('/local/mcampana/devel/hpp/src/test-hpp/script')

kRange = 5
robot = Robot('potential')
robot.setJointBounds('base_joint_xy', [-kRange, kRange, -kRange, kRange])
ps = ProblemSolver(robot)
cl = robot.client

# q = [x, y, theta] # (z not considered since planar)
q1 = [-4, 4, 1, 0]
q2 = [4, -4, 1, 0]  # obstS 1
#q1 = [-4, 4, 0]; q2 = [4, -4, 0] # obstS 1

ps.setInitialConfig(q1)
ps.addGoalConfig(q2)
cl.obstacle.loadObstacleModel('potential_description', 'obstacles_concaves',
                              'obstacles_concaves')

#ps.createOrientationConstraint ("orConstraint", "base_joint_rz", "", [1,0,0,0], [0,0,1])
#ps.setNumericalConstraints ("constraints", ["orConstraint"])

ps.selectPathPlanner("VisibilityPrmPlanner")
#ps.selectPathValidation ("Dichotomy", 0.)

ps.solve()
ps.pathLength(0)

ps.addPathOptimizer("GradientBased")
ps.optimizePath(0)
q_init = robot.getCurrentConfig ()
q_goal = q_init [::]
q_init [0:2] = [-3.2, -4]
rank = robot.rankInConfiguration ['torso_lift_joint']
q_init [rank] = 0.2
r (q_init)

q_goal [0:2] = [-3.2, -4]
rank = robot.rankInConfiguration ['l_shoulder_lift_joint']
q_goal [rank] = 0.5
rank = robot.rankInConfiguration ['l_elbow_flex_joint']
q_goal [rank] = -0.5
rank = robot.rankInConfiguration ['r_shoulder_lift_joint']
q_goal [rank] = 0.5
rank = robot.rankInConfiguration ['r_elbow_flex_joint']
q_goal [rank] = -0.5
r (q_goal)

ps.loadObstacleFromUrdf ("iai_maps", "kitchen_area", "")

ps.setInitialConfig (q_init)
ps.addGoalConfig (q_goal)
ps.selectPathPlanner ("PRM")
ps.solve ()

from hpp_ros import PathPlayer
pp = PathPlayer (robot.client, r)

pp (0)
pp (1)
Beispiel #26
0
#cl.obstacle.loadObstacleModel('robot_2d_description','environment_2d','')
#cl.obstacle.loadObstacleModel('robot_2d_description','environment_2d_harder','')

from hpp.gepetto import Viewer, PathPlayer
r = Viewer (ps)
pp = PathPlayer (cl, r)
#r.loadObstacleModel ("robot_2d_description","environment_2d","environment_2d")
r.loadObstacleModel ("robot_2d_description","environment_2d_harder","environment_2d_harder")
#r.loadObstacleModel ("iai_maps", "labyrinth", "labyrinth")
#r.loadObstacleModel ("iai_maps", "labyrinth2", "labyrinth2") 

# q = [x, y, dir.x, dir.y] # limits in URDF file
#q1 = [-1.3, 0.2]; q2 = [0.2, 0.2]; q3 = [5, -2.8]; q4 = [9, 2.2]; q5 = [12.4, -5.8];
#q1 = [-1.3, 0.2, 0]; q2 = [0.2, 0.2, 0]; q3 = [5, -2.8, 0]; q4 = [9, 2.2, 0]; q5 = [12.4, -5.8, 0];
q1 = [-1.3, 0.2, 0, 1];  q5 = [12.4, -5.8, 0, 1];
ps.setInitialConfig (q1); ps.addGoalConfig (q5); ps.solve ()


# with sub-paths
q1 = [-1.3, 0.2, 0, 1]; q2 = [0.2, 0.2, 0, 1]; q3 = [5, -2.8, 0, 1]; q4 = [9, 2.2, 0, 1];
ps.setInitialConfig (q1); ps.addGoalConfig (q2); ps.solve () # pp(0)
ps.resetGoalConfigs ()
ps.setInitialConfig (q2); ps.addGoalConfig (q3); ps.solve () # pp(1)
ps.resetGoalConfigs ()
ps.setInitialConfig (q3); ps.addGoalConfig (q4); ps.solve () # pp(2)
ps.resetGoalConfigs ()
ps.setInitialConfig (q4); ps.addGoalConfig (q5); ps.solve () # pp(3)
ps.resetGoalConfigs ()
ps.setInitialConfig (q1); ps.addGoalConfig (q5); ps.solve () # pp(4)
ps.resetGoalConfigs ()