q_init[3:7] = [0,0,0,1]
q_init [0:3] = [0.16, 0.25, 1.14]
v (q_init)
ps.setInitialConfig (q_init)
# set goal config
rbprmBuilder.setCurrentConfig (q_init)
q_goal = q_init [::]
q_goal[0] = 1.09
v(q_goal)


ps.addGoalConfig (q_goal)

# Choosing RBPRM shooter and path validation methods.
ps.selectConfigurationShooter("RbprmShooter")
ps.addPathOptimizer ("RandomShortcutDynamic")
ps.selectPathValidation("RbprmPathValidation",0.05)
# Choosing kinodynamic methods :
ps.selectSteeringMethod("RBPRMKinodynamic")
ps.selectDistance("Kinodynamic")
ps.selectPathPlanner("DynamicPlanner")

# Solve the planning problem :
success = ps.client.problem.prepareSolveStepByStep()
ps.client.problem.finishSolveStepByStep()

# display solution : 
from hpp.gepetto import PathPlayer
pp = PathPlayer (v)
pp.dt=0.1
#pp.displayVelocityPath(0)
for i in range(0, len(Q)-1):
    ps.setInitialConfig (Q[i]); ps.addGoalConfig (Q[i+1]); ps.solve (); ps.resetGoalConfigs ()

ps.setInitialConfig (Q[0]); ps.addGoalConfig (Q[len(Q)-1]); ps.solve ();



nInitialPath = ps.numberPaths()-1 #8
ps.pathLength(nInitialPath)

#ps.addPathOptimizer('RandomShortcut') #9
#ps.optimizePath (nInitialPath)
#ps.pathLength(ps.numberPaths()-1)

#ps.clearPathOptimizers()
ps.addPathOptimizer("GradientBased")
ps.optimizePath (nInitialPath)
ps.numberPaths()
ps.pathLength(ps.numberPaths()-1)

pp(ps.numberPaths()-1)


ps.configAtParam(2,0.5)
r(ps.configAtParam(0,2))
ps.getWaypoints (0)
ps.getWaypoints (ps.numberPaths()-1)

# plot paths
import numpy as np
dt = 0.1
Example #3
0
q_init[0:2] = [-3.2, -4]
rank = robot.rankInConfiguration['torso_lift_joint']
q_init[rank] = 0.2
r(q_init)

q_goal[0:2] = [-3.2, -4]
rank = robot.rankInConfiguration['l_shoulder_lift_joint']
q_goal[rank] = 0.5
rank = robot.rankInConfiguration['l_elbow_flex_joint']
q_goal[rank] = -0.5
rank = robot.rankInConfiguration['r_shoulder_lift_joint']
q_goal[rank] = 0.5
rank = robot.rankInConfiguration['r_elbow_flex_joint']
q_goal[rank] = -0.5
r(q_goal)

r.loadObstacleModel("iai_maps", "kitchen_area", "kitchen")

ps.setInitialConfig(q_init)
ps.addGoalConfig(q_goal)
ps.selectPathPlanner("PRM")
ps.addPathOptimizer("RandomShortcut")

# ps.solve ()

# from hpp.gepetto import PathPlayer
# pp = PathPlayer (robot.client, r)

# pp (0)
# pp (1)
class AbstractPathPlanner:

    rbprmBuilder = None
    ps = None
    v = None
    afftool = None
    pp = None
    extra_dof_bounds = None
    robot_node_name = None  # name of the robot in the node list of the viewer

    def __init__(self):
        self.v_max = -1  # bounds on the linear velocity for the root, negative values mean unused
        self.a_max = -1  # bounds on the linear acceleration for the root, negative values mean unused
        self.root_translation_bounds = [
            0
        ] * 6  # bounds on the root translation position (-x, +x, -y, +y, -z, +z)
        self.root_rotation_bounds = [
            -3.14, 3.14, -0.01, 0.01, -0.01, 0.01
        ]  # bounds on the rotation of the root (-z, z, -y, y, -x, x)
        # The rotation bounds are only used during the random sampling, they are not enforced along the path
        self.extra_dof = 6  # number of extra config appended after the joints configuration, 6 to store linear root velocity and acceleration
        self.mu = 0.5  # friction coefficient between the robot and the environment
        self.used_limbs = [
        ]  # names of the limbs that must be in contact during all the motion
        self.size_foot_x = 0  # size of the feet along the x axis
        self.size_foot_y = 0  # size of the feet along the y axis
        self.q_init = []
        self.q_goal = []

    @abstractmethod
    def load_rbprm(self):
        """
        Build an rbprmBuilder instance for the correct robot and initialize it's extra config size
        """
        pass

    def set_configurations(self):
        self.rbprmBuilder.client.robot.setDimensionExtraConfigSpace(
            self.extra_dof)
        self.q_init = self.rbprmBuilder.getCurrentConfig()
        self.q_goal = self.rbprmBuilder.getCurrentConfig()
        self.q_init[2] = self.rbprmBuilder.ref_height
        self.q_goal[2] = self.rbprmBuilder.ref_height

    def compute_extra_config_bounds(self):
        """
        Compute extra dof bounds from the current values of v_max and a_max
        By default, set symmetrical bounds on x and y axis and bounds z axis values to 0
        """
        # bounds for the extradof : by default use v_max/a_max on x and y axis and 0 on z axis
        self.extra_dof_bounds = [
            -self.v_max, self.v_max, -self.v_max, self.v_max, 0, 0,
            -self.a_max, self.a_max, -self.a_max, self.a_max, 0, 0
        ]

    def set_joints_bounds(self):
        """
        Set the root translation and rotation bounds as well as the the extra dofs bounds
        """
        self.rbprmBuilder.setJointBounds("root_joint",
                                         self.root_translation_bounds)
        self.rbprmBuilder.boundSO3(self.root_rotation_bounds)
        self.rbprmBuilder.client.robot.setExtraConfigSpaceBounds(
            self.extra_dof_bounds)

    def set_rom_filters(self):
        """
        Define which ROM must be in collision at all time and with which kind of affordances
        By default it set all the roms in used_limbs to be in contact with 'support' affordances
        """
        self.rbprmBuilder.setFilter(self.used_limbs)
        for limb in self.used_limbs:
            self.rbprmBuilder.setAffordanceFilter(limb, ['Support'])

    def init_problem(self):
        """
        Load the robot, set the bounds and the ROM filters and then
        Create a ProblemSolver instance and set the default parameters.
        The values of v_max, a_max, mu, size_foot_x and size_foot_y must be defined before calling this method
        """
        self.load_rbprm()
        self.set_configurations()
        self.compute_extra_config_bounds()
        self.set_joints_bounds()
        self.set_rom_filters()
        self.ps = ProblemSolver(self.rbprmBuilder)
        # define parameters used by various methods :
        if self.v_max >= 0:
            self.ps.setParameter("Kinodynamic/velocityBound", self.v_max)
        if self.a_max >= 0:
            self.ps.setParameter("Kinodynamic/accelerationBound", self.a_max)
        if self.size_foot_x > 0:
            self.ps.setParameter("DynamicPlanner/sizeFootX", self.size_foot_x)
        if self.size_foot_y > 0:
            self.ps.setParameter("DynamicPlanner/sizeFootY", self.size_foot_y)
        self.ps.setParameter("DynamicPlanner/friction", 0.5)
        # sample only configuration with null velocity and acceleration :
        self.ps.setParameter("ConfigurationShooter/sampleExtraDOF", False)

    def init_viewer(self,
                    env_name,
                    env_package="hpp_environments",
                    reduce_sizes=[0, 0, 0],
                    visualize_affordances=[]):
        """
        Build an instance of hpp-gepetto-viewer from the current problemSolver
        :param env_name: name of the urdf describing the environment
        :param env_package: name of the package containing this urdf (default to hpp_environments)
        :param reduce_sizes: Distance used to reduce the affordances plan toward the center of the plane
        (in order to avoid putting contacts closes to the edges of the surface)
        :param visualize_affordances: list of affordances type to visualize, default to none
        """
        vf = ViewerFactory(self.ps)
        self.afftool = AffordanceTool()
        self.afftool.setAffordanceConfig('Support', [0.5, 0.03, 0.00005])
        self.afftool.loadObstacleModel("package://" + env_package + "/urdf/" +
                                       env_name + ".urdf",
                                       "planning",
                                       vf,
                                       reduceSizes=reduce_sizes)

        self.v = vf.createViewer(ghost=True, displayArrows=True)
        self.pp = PathPlayer(self.v)
        for aff_type in visualize_affordances:
            self.afftool.visualiseAffordances(aff_type, self.v,
                                              self.v.color.lightBrown)

    def init_planner(self, kinodynamic=True, optimize=True):
        """
        Select the rbprm methods, and the kinodynamic ones if required
        :param kinodynamic: if True, also select the kinodynamic methods
        :param optimize: if True, add randomShortcut path optimizer (or randomShortcutDynamic if kinodynamic is also True)
        """
        self.ps.selectConfigurationShooter("RbprmShooter")
        self.ps.selectPathValidation("RbprmPathValidation", 0.05)
        if kinodynamic:
            self.ps.selectSteeringMethod("RBPRMKinodynamic")
            self.ps.selectDistance("Kinodynamic")
            self.ps.selectPathPlanner("DynamicPlanner")
        if optimize:
            if kinodynamic:
                self.ps.addPathOptimizer("RandomShortcutDynamic")
            else:
                self.ps.addPathOptimizer("RandomShortcut")

    def solve(self):
        """
        Solve the path planning problem.
        q_init and q_goal must have been defined before calling this method
        """
        if len(self.q_init) != self.rbprmBuilder.getConfigSize():
            raise ValueError(
                "Initial configuration vector do not have the right size")
        if len(self.q_goal) != self.rbprmBuilder.getConfigSize():
            raise ValueError(
                "Goal configuration vector do not have the right size")
        self.ps.setInitialConfig(self.q_init)
        self.ps.addGoalConfig(self.q_goal)
        self.v(self.q_init)
        t = self.ps.solve()
        print("Guide planning time : ", t)

    def display_path(self, path_id=-1, dt=0.1):
        """
        Display the path in the viewer, if no path specified display the last one
        :param path_id: the Id of the path specified, default to the most recent one
        :param dt: discretization step used to display the path (default to 0.1)
        """
        if self.pp is not None:
            if path_id < 0:
                path_id = self.ps.numberPaths() - 1
            self.pp.dt = dt
            self.pp.displayVelocityPath(path_id)

    def play_path(self, path_id=-1, dt=0.01):
        """
        play the path in the viewer, if no path specified display the last one
        :param path_id: the Id of the path specified, default to the most recent one
        :param dt: discretization step used to display the path (default to 0.01)
        """
        self.show_rom()
        if self.pp is not None:
            if path_id < 0:
                path_id = self.ps.numberPaths() - 1
            self.pp.dt = dt
            self.pp(path_id)

    def hide_rom(self):
        """
        Remove the current robot from the display
        """
        self.v.client.gui.setVisibility(self.robot_node_name, "OFF")

    def show_rom(self):
        """
        Add the current robot to the display
        """
        self.v.client.gui.setVisibility(self.robot_node_name, "ON")

    @abstractmethod
    def run(self):
        """
        Must be defined in the child class to run all the methods with the correct arguments.
        """
        # example of definition:
        """
        self.init_problem()
        # define initial and goal position
        self.q_init[:2] = [0, 0]
        self.q_goal[:2] = [1, 0]
        
        self.init_viewer("multicontact/ground", visualize_affordances=["Support"])
        self.init_planner()
        self.solve()
        self.display_path()
        self.play_path()
        """
        pass
vf (q_init)

q_goal [0:2] = [-3.2, -4]
rank = robot.rankInConfiguration ['l_shoulder_lift_joint']
q_goal [rank] = 0.5
rank = robot.rankInConfiguration ['l_elbow_flex_joint']
q_goal [rank] = -0.5
rank = robot.rankInConfiguration ['r_shoulder_lift_joint']
q_goal [rank] = 0.5
rank = robot.rankInConfiguration ['r_elbow_flex_joint']
q_goal [rank] = -0.5
vf (q_goal)

vf.loadObstacleModel ("iai_maps", "kitchen_area", "kitchen")

ps.setInitialConfig (q_init)
ps.addGoalConfig (q_goal)

ps.addPathOptimizer ("RandomShortcut")

# print (ps.solve ())

## Uncomment this to connect to a viewer server and play solution paths
# 
# v = vf.createViewer()
# from hpp.gepetto import PathPlayer
# pp = PathPlayer (v)

# pp (0)
# pp (1)
ps.setInitialConfig(q1)
ps.addGoalConfig(q2)
cl.obstacle.loadObstacleModel('potential_description', 'obstacles_concaves',
                              'obstacles_concaves')

#ps.createOrientationConstraint ("orConstraint", "base_joint_rz", "", [1,0,0,0], [0,0,1])
#ps.setNumericalConstraints ("constraints", ["orConstraint"])

ps.selectPathPlanner("VisibilityPrmPlanner")
#ps.selectPathValidation ("Dichotomy", 0.)

ps.solve()
ps.pathLength(0)

ps.addPathOptimizer("GradientBased")
ps.optimizePath(0)
ps.numberPaths()
ps.pathLength(ps.numberPaths() - 1)

import matplotlib.pyplot as plt
from mutable_trajectory_plot import planarPlot, addNodePlot
from parseLog import parseCollConstrPoints
num_log = 31891
contactPoints = parseCollConstrPoints(num_log, '77: contact point = (')
plt = planarPlot(cl, 0, ps.numberPaths() - 1, plt, 1.5, 5)
plt = addNodePlot(contactPoints, 'ko', '', 5.5, plt)
plt.show()

ps.addPathOptimizer('RandomShortcut')
ps.optimizePath(0)
q2hard = [7.60, -2.41, 0.545, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.8, 0.0, -0.4, -0.55, 0.0, -0.6, 0.174532, -0.174532, 0.174532, -0.174532, 0.174532, -0.174532, -2.8, 0.0, 0.1, -0.2, -0.1, 0.4, 0.174532, -0.174532, 0.174532, -0.174532, 0.174532, -0.174532, -0.2, 0.6, -0.1, 1.2, -0.4, 0.2, -0.3, 0.0, -0.4, 0.2, 0.7, 0.0]

robot.isConfigValid(q1)
robot.isConfigValid(q2)

# qf should be invalid
qf = [1, -3, 3, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.2, 1.0, -0.4, -1.0, 0.0, -0.2, 0.174532, -0.174532, 0.174532, -0.174532, 0.174532, -0.174532, -1.5, -0.2, 0.1, -0.3, 0.1, 0.1, 0.174532, -0.174532, 0.174532, -0.174532, 0.174532, -0.174532, -0.2, 0.6, -0.453786, 0.872665, -0.418879, 0.2, -0.4, 0.0, -0.453786, 0.1, 0.7, 0.0]
robot.isConfigValid(qf)

ps.setInitialConfig (q1); ps.addGoalConfig (q2); ps.solve ()

ps.solve ()
ps.pathLength(0)

ps.addPathOptimizer('RandomShortcut')
ps.optimizePath (0)
ps.pathLength(1)

ps.clearPathOptimizers()
ps.addPathOptimizer("GradientBased")
ps.optimizePath (0)
ps.numberPaths()
ps.pathLength(ps.numberPaths()-1)

pp(ps.numberPaths()-1)


r(ps.configAtParam(0,2))
ps.getWaypoints (0)
Example #8
0
import datetime as dt
totalSolveTime = dt.timedelta (0)
totalOptimTime = dt.timedelta (0)
totalNumberNodes = 0
N = 20
for i in range (N):
    ps.clearPathOptimizers()
    ps.clearRoadmap ()
    ps.resetGoalConfigs ()
    ps.setInitialConfig (q_init)
    ps.addGoalConfig (q_goal)

    t1 = dt.datetime.now ()
    ps.solve ()
    t2 = dt.datetime.now ()
    ps.addPathOptimizer ("SplineGradientBased_bezier3")
    ps.optimizePath (ps.numberPaths() - 1)
    t3 = dt.datetime.now ()

    totalSolveTime += t2 - t1
    totalOptimTime += t3 - t2
    print "Solve:", t2-t1
    print "Optim:", t3-t2
    n = len (ps.client.problem.nodes ())
    totalNumberNodes += n
    print ("Number nodes: " + str(n))

print ("Average solve time: " + str ((totalSolveTime.seconds+1e-6*totalSolveTime.microseconds)/float (N)))
print ("Average optim time: " + str ((totalOptimTime.seconds+1e-6*totalOptimTime.microseconds)/float (N)))
print ("Average number nodes: " + str (totalNumberNodes/float (N)))
#ps.readRoadmap ('/local/mcampana/devel/hpp/data/puzzle_easy_RRT.rdm')
#ps.readRoadmap ('/local/mcampana/devel/hpp/data/puzzle_easy_PRM1.rdm') # srand # problem ?
#ps.readRoadmap ('/local/mcampana/devel/hpp/data/puzzle_easy_PRM1.rdm') # srand 1453110445(909sec) [COLL!]
#ps.readRoadmap ('/local/mcampana/devel/hpp/data/puzzle_easy_PRM2.rdm') # srand  # just after solve, GB OK. But after readroadmap+solve, segfault quaternions....
ps.readRoadmap ('/local/mcampana/devel/hpp/data/puzzle_easy_PRM_test1.rdm') #srand 1454520599 working0.05
# srand 1454521537 (no RM saved) works 7 -> 5, best 0.2
ps.solve ()
ps.pathLength(0)
len(ps.getWaypoints (0))



r(q1)

import numpy as np
"""
ps.addPathOptimizer("Prune")
ps.optimizePath (0)
ps.numberPaths()
ps.pathLength(ps.numberPaths()-1)
len(ps.getWaypoints (ps.numberPaths()-1))
"""
ps.clearPathOptimizers()
cl.problem.setAlphaInit (0.05)
ps.addPathOptimizer("GradientBased")
ps.optimizePath (0)
ps.numberPaths()
ps.pathLength(ps.numberPaths()-1)

tGB = cl.problem.getTimeGB ()
timeValuesGB = cl.problem.getTimeValues ()
Example #10
0
r = Viewer(ps)

q_init = robot.getCurrentConfig()
q_goal = q_init[::]
q1 = q_init[::]
q_init[0] = -.5
q_goal[0] = .5
r(q_init)
r(q_goal)
q1[:2] = (0., .5)
r.loadObstacleModel("hpp_tutorial", "box", "box-1")

ps.selectPathValidation("Dichotomy", 0.)
ps.setInitialConfig(q_init)
ps.addGoalConfig(q1)
ps.solve()
ps.resetGoalConfigs()
ps.addGoalConfig(q_goal)
ps.solve()

ps.addPathOptimizer("GradientBased")
#ps.optimizePath (ps.numberPaths () - 1)

from hpp.gepetto import PathPlayer

pp = PathPlayer(robot.client, r)

#pp (0)
#pp (1)
Example #11
0
robot = Robot('lydia')
robot.setJointBounds('base_joint_xyz', [-0.9, 0.9, -0.9, 0.9, -1.1, 1.1])
ps = ProblemSolver(robot)
r = Viewer(ps)

r.loadObstacleModel("hpp_benchmark", "obstacle", "obstacle")

q_init = robot.getCurrentConfig()
q_goal = q_init[::]

q_init[2] = -0.6
q_goal[2] = 0.6

ps.selectPathPlanner("VisibilityPrmPlanner")
ps.selectPathValidation("Dichotomy", 0.)

ps.setInitialConfig(q_init)
ps.addGoalConfig(q_goal)

ps.readRoadmap("/local/mcampana/devel/hpp/src/hpp_benchmark/roadmap/lydia.rdm")
#ps.solve ()

pp = PathPlayer(robot.client, r)
"""
ps.addPathOptimizer ("GradientBased")
ps.optimizePath (0)
ps.numberPaths()
ps.pathLength(ps.numberPaths()-1)
"""
Example #12
0

#ps.readRoadmap ('/local/mcampana/devel/hpp/data/ur5-sphere-PRM.rdm')
#ps.readRoadmap ('/local/mcampana/devel/hpp/data/ur5-sphere-RRT.rdm')

ps.selectPathPlanner ("VisibilityPrmPlanner")
#ps.selectPathValidation ("Dichotomy", 0.)

ps.solve ()
ps.pathLength(0)
len(ps.getWaypoints (0))

#ps.saveRoadmap ('/local/mcampana/devel/hpp/data/ur5-sphere-PRM.rdm')


ps.addPathOptimizer("Prune") # NO CHANGE WITH PRM+DISCR
ps.optimizePath (0)
ps.numberPaths()
ps.pathLength(ps.numberPaths()-1)
len(ps.getWaypoints (ps.numberPaths()-1))

ps.clearPathOptimizers()
cl.problem.setAlphaInit (0.3)
ps.addPathOptimizer("GradientBased")
ps.optimizePath (0)
ps.numberPaths()
ps.pathLength(ps.numberPaths()-1)
tGB = cl.problem.getTimeGB ()
timeValuesGB = cl.problem.getTimeValues ()
gainValuesGB = cl.problem.getGainValues ()
newGainValuesGB = ((1-np.array(gainValuesGB))*100).tolist() #percentage of initial length-value
Example #13
0
class Agent(Client):
    robot = None
    platform = None
    index = 0
    ps = None
    # we load other agents as ghosts to reduce the computation time while planning
    ghosts = []
    ghost_urdf = ''
    ghost_package = ''
    # to avoid confusion, we use start and end instead of init and goal
    start_config = []
    end_config = []
    current_config = []  # this is not used for now
    permitted_plan = []  # this is the plan generated
    repeat = 0  # to help the selectProblem function

    # an agent should have a robot and the start and end configuration
    # to avoid confusion, we use start_config instead of init_config  and
    # end_confi instead of goal_config
    def __init__(self, robot, start, end):
        Client.__init__(self)
        self.repeat = 0
        # print 'creating an agent of type ', robotType
        self.robot = robot
        self.start_config = start
        self.end_config = end
        self.current_config = self.start_config
        self.__plan_proposed = []

    # once all agents are generated, we may register the agents to a platform
    def registerPlatform(self, platform, index):
        self.platform = platform
        self.index = index

    # this function gives some information about the agent and robot it is managing
    def printInformation(self):
        print '-------------------------------------------'
        print 'name of the robot:\t', self.robot.name
        print 'configuration size:\t', self.robot.getConfigSize()
        print 'degree of freedom:\t', self.robot.getNumberDof()
        print 'mass of the robot:\t', self.robot.getMass()
        print 'the center of mass:\t', self.robot.getCenterOfMass()
        config = self.robot.getCurrentConfig()
        nm = self.robot.getJointNames()
        print 'there are ', len(nm), 'joint names in total. They are:'
        for i in range(len(nm)):
            lower = self.robot.getJointBounds(nm[i])[0]
            upper = self.robot.getJointBounds(nm[i])[1]
            print 'joint name: ', nm[
                i], '\trank in configuration:', self.robot.rankInConfiguration[
                    nm[i]],
            print '\tlower bound: {0:.3f}'.format(
                lower), '\tupper bound: {0:.3f}'.format(upper)

    # set up the environment
    def setEnvironment(self):
        if self.platform.env != None:
            self.ps.loadObstacleFromUrdf(self.platform.env.packageName,
                                         self.platform.env.urdfName,
                                         self.platform.env.name)
            # self.ps.moveObstacle('airbase_link_0', [0,0, -3, 1,0,0,0])

    # load the other agents to the problem solver
    def loadOtherAgents(self):
        # print 'There are ', len(self.platform.agents), 'agents'
        #load ghost agents
        for a in self.platform.agents:
            if (a.index != self.index):
                # if it is not itself then load a ghost agent
                g = Ghost()
                self.ps.loadObstacleFromUrdf(
                    g.packageName, g.urdfName,
                    a.robot.name)  # it's the robot's name!!!
                # and then place it at the initial location of the agent
                # print self.robot.name, ' is now loading ', a.robot.name, ' as a ghost'
                config = a.current_config
                spec = self.getMoveSpecification(config)
                spec[2] = 0.3
                self.obstacle.moveObstacle(a.robot.name + 'base_link_0', spec)

    # load agents from the node
    def loadOtherAgentsFromNode(self, node):
        print 'There are ', len(self.platform.agents), 'agents'
        #load ghost agents
        for a in self.platform.agents:
            if (a.index != self.index):
                # if it is not itself then load a ghost agent
                g = Ghost()
                self.ps.loadObstacleFromUrdf(
                    g.packageName, g.urdfName,
                    a.robot.name)  # it's the robot's name!!!
                # and then place it at the initial location of the agent
                config = node.getAgentCurrentConfig(a.index)
                spec = self.getMoveSpecification(config)
                self.obstacle.moveObstacle(a.robot.name + 'base_link_0', spec)
                print self.robot.name, ' is now loading ', a.robot.name, ' as a ghost', 'it is at ', spec[
                    0], spec[1]

    # note that the default solver does not consider the position of other agents
    def startDefaultSolver(self):
        self.repeat += 1
        name = self.robot.name
        self.problem.selectProblem(str(self.index) + ' ' + str(self.repeat))
        self.robot = HyQ(name)
        self.ps = ProblemSolver(self.robot)
        self.ps.setInitialConfig(self.start_config)
        self.ps.addGoalConfig(self.end_config)
        self.ps.selectPathPlanner("VisibilityPrmPlanner")
        self.ps.addPathOptimizer("RandomShortcut")

    # initialise a solver from a node, the node contains information about other agents and itself
    # this method is used when proposing plans while interacting with platform for MAS path planning
    def startNodeSolver(self, node):
        self.repeat += 1
        name = self.robot.name
        self.problem.selectProblem(str(self.index) + ' ' + str(self.repeat))
        self.robot = HyQ(name)
        self.ps = ProblemSolver(self.robot)
        cfg = node.getAgentCurrentConfig(self.index)
        print 'this iteration, the agent', name, 'starts from ', cfg[0], cfg[1]
        self.ps.setInitialConfig(cfg)
        self.ps.addGoalConfig(self.end_config)
        self.ps.selectPathPlanner("VisibilityPrmPlanner")
        self.ps.addPathOptimizer("RandomShortcut")

    # this is only used when the agent takes too long (30 seconds) while planning
    def terminate_solving(self):
        self.problem.interruptPathPlanning()

    # the solve method for problem solver but with a time bound
    def solve(self):
        # try catch -------------------
        try:
            t = Timer(30.0, self.terminate_solving)
            t.start()
            print 'solved: ', self.ps.solve()
            t.cancel()
        except Error as e:
            print e.msg
            print '***************\nfailed to plan within limited time\n**************'
            return -1

        # self.repeat += 1

    # store the path for two reasons:
    # 1. store as a default plan
    # 2. to continue the path
    def storePath(self, choice=0, segments=8):
        # always store the first one for now
        self.__plan_proposed = []
        for p in range(int(round(segments * self.ps.pathLength(choice)))):
            self.__plan_proposed.append(
                self.ps.configAtParam(choice, p * 1.0 / segments))

        # the last configuration is the goal configuration
        if self.ps.configAtParam(
                choice, self.ps.pathLength(choice)) == self.end_config:
            self.__plan_proposed.append(self.end_config)
        print 'stored; plan length: ', len(self.__plan_proposed)

    # this is hard colded for now for the airplane example, we should introduce an entry for the environment
    # class about it.
    def setBounds(self):
        self.robot.setJointBounds("base_joint_xy", [-35, 10, -2.6, 4.3])

    #the rest are just some helping functions
    def getConfigOfProposedPlanAtTime(self, index):
        return self.__plan_proposed[index]

    def getConfigOfPermittedPlanAtTime(self, index):
        return self.permitted_plan[index]

    def getProposedPlanLength(self):
        return len(self.__plan_proposed)

    def setPermittedPlan(self, plan):
        self.permitted_plan = plan

    def getPermittedPlanLength(self):
        return len(self.permitted_plan)

    # export the permitted plan to a specific file in the format
    # 	agent X
    # 	config 1
    # 	config 2
    # 	etc
    def exportPermittedPlan(self, filename):
        f = open(filename, 'a+')
        f.write('agent ' + str(self.index) + '\n')
        for p in self.permitted_plan:
            f.write(str(p)[1:-1] + '\n')
        f.close()

    # for the sake of manipulation, we return a copy of it
    def obtainPermittedPlan(self):
        return copy.copy(self.permitted_plan)

        # we will get only a copy of it, not the original one
        # to remind the difference, we use 'obtain' instead of 'get'
    def obtainProposedPlan(self):
        return copy.copy(
            self.__plan_proposed
        )  #for some reason, sometimes the value would maybe changed???

    # to transfer the specification from 2D to 3D
    def getMoveSpecification(self, config):
        x = config[0]
        y = config[1]
        th = atan2(config[3], config[2])
        # print 'sin = ', self.init_config[3], ' cos = ', self.init_config[2], ' th = ', th
        return [x, y, 0, cos(th / 2), 0, 0, sin(th / 2)]

    # the function to compute a plan, exceptions are not handled in this simple demo
    def computePlan(self, node):
        self.startNodeSolver(node)
        self.setBounds()
        self.setEnvironment()
        self.loadOtherAgentsFromNode(node)
        if self.solve() != -1:
            self.storePath()
        else:
            self.__plan_proposed = self.__plan_proposed[node.progress_time::]
            [node.getAgentCurrentConfig(self.index)]
            print 'take the previous one and continue the searching'
            return -1
Example #14
0
robot = Robot ('robot_2d')
ps = ProblemSolver (robot)
cl = robot.client
cl.obstacle.loadObstacleModel('robot_2d_description','cylinder_obstacle','')

# q = [x, y] # limits in URDF file
q1 = [-2, 0]; q2 = [-0.2, 2]; q3 = [0.2, 2]; q4 = [2, 0]
ps.setInitialConfig (q1); ps.addGoalConfig (q2); ps.solve (); ps.resetGoalConfigs ()
ps.setInitialConfig (q2); ps.addGoalConfig (q3); ps.solve (); ps.resetGoalConfigs ()
ps.setInitialConfig (q3); ps.addGoalConfig (q4); ps.solve (); ps.resetGoalConfigs ()
ps.setInitialConfig (q1); ps.addGoalConfig (q4); ps.solve (); ps.resetGoalConfigs ()
# pp(3) = p0 final

#ps.addPathOptimizer("GradientBased")
#ps.addPathOptimizer("Prune")
ps.addPathOptimizer("PartialRandomShortcut")
ps.optimizePath(3) # pp(4) = p1 final

ps.pathLength(3)
ps.pathLength(4)
ps.getWaypoints (3)
ps.getWaypoints (4)
# should be [-0.07 0] [0.07 0] if alpha_init=1


"""
q1 = [-2, 0]; q2 = [-1, 1]
ps.setInitialConfig (q1); ps.addGoalConfig (q2); ps.solve ()
ps.resetGoalConfigs ()
q1 = [-1, 1]; q2 = [-1.2, 1.8]
ps.setInitialConfig (q1); ps.addGoalConfig (q2); ps.solve ()