afftool = AffordanceTool() afftool.setAffordanceConfig('Support', [0.5, 0.03, 0.00005]) afftool.loadObstacleModel(packageName, "downSlope", "planning", r) #r.loadObstacleModel (packageName, "ground", "planning") afftool.visualiseAffordances('Support', r, [0.25, 0.5, 0.5]) r.addLandmark(r.sceneName, 1) # Setting initial and goal configurations q_init = rbprmBuilder.getCurrentConfig() q_init[3:7] = [0.9659, 0, 0.2588, 0] q_init[0:3] = [-1.25, 1, 1.7] r(q_init) #q_init[3:7] = [0.7071,0,0,0.7071] #q_init [0:3] = [1, 1, 0.65] rbprmBuilder.setCurrentConfig(q_init) q_goal = q_init[::] #q_goal[3:7] = [0.7071,0,0,0.7071] #q_goal [0:3] = [1, 5, 0.65]; r(q_goal) q_goal[3:7] = [1, 0, 0, 0] q_goal[0:3] = [2, 1, 0.60] r(q_goal) #q_goal[3:7] = [0.9659,0,0.2588,0] #q_goal[7:10] = [vMax,0,-2] #q_goal [0:3] = [0, 1, 0.8]; r(q_goal) r(q_goal) #~ q_goal [0:3] = [-1.5, 0, 0.63]; r (q_goal) # Choosing a path optimizer ps.setInitialConfig(q_init)
tested = Builder () tested.loadModel(urdfNameTested, urdfNameRoms, rootJointType, meshPackageName, packageName, urdfSuffix, srdfSuffix) #~ tested.setNormalFilter('hrp2_lleg_rom', [0,0,1], 0.9) #~ tested.setFilter(['hrp2_lleg_rom','hrp2_rleg_rom']) ps = ProblemSolver( tested ) r = Viewer (ps) r.loadObstacleModel (packageName, scene, "planning") tested.setJointBounds ("base_joint_xyz", [-10.,10,-10,10,0,20]) ps.client.problem.selectConFigurationShooter("RbprmShooter") q_init = tested.getCurrentConfig (); q_init [0:3] = [-10, -0.82, 1.25]; tested.setCurrentConfig (q_init); r (q_init) q_goal = q_init [::] q_goal [0:3] = [-9, -0.65, 1.25]; r (q_goal) ps.setInitialConfig (q_init) ps.addGoalConfig (q_goal) t = ps.solve () res = {} x_start = -1.5 y_start = 0 x_max = 2.84 y_max = 2.65 iter_step = 0.01 res = {}
rbprmBuilder.setFilter(['hyq_rhleg_rom', 'hyq_lfleg_rom', 'hyq_rfleg_rom','hyq_lhleg_rom']) rbprmBuilder.setNormalFilter('hyq_lhleg_rom', [0,0,1], 0.9) rbprmBuilder.setNormalFilter('hyq_rfleg_rom', [0,0,1], 0.9) rbprmBuilder.setNormalFilter('hyq_lfleg_rom', [0,0,1], 0.9) rbprmBuilder.setNormalFilter('hyq_rhleg_rom', [0,0,1], 0.9) rbprmBuilder.boundSO3([-0.1,0.1,-1,1,-1,1]) #~ from hpp.corbaserver.rbprm. import ProblemSolver from hpp.corbaserver.rbprm.problem_solver import ProblemSolver ps = ProblemSolver( rbprmBuilder ) r = Viewer (ps) q_init = rbprmBuilder.getCurrentConfig (); q_init = [-6,-3,0.8,1,0,0,0]; rbprmBuilder.setCurrentConfig (q_init); r (q_init) q_goal = [4, 4, 0.8, 1, 0, 0, 0]; r (q_goal) ps.addPathOptimizer("RandomShortcut") ps.setInitialConfig (q_init) ps.addGoalConfig (q_goal) ps.client.problem.selectConFigurationShooter("RbprmShooter") ps.client.problem.selectPathValidation("RbprmPathValidation",0.01) r.loadObstacleModel (packageName, name_of_scene, "planning") r.addLandmark(r.sceneName,1) #~ ps.solve () t = ps.solve () if isinstance(t, list): t = t[0]* 3600000 + t[1] * 60000 + t[2] * 1000 + t[3]
#~ rbprmBuilder.setNormalFilter('hrp2_rarm_rom', [0,0,1], 0.5) #~ rbprmBuilder.setNormalFilter('hrp2_lleg_rom', [0,0,1], 0.9) #~ rbprmBuilder.setNormalFilter('hrp2_rleg_rom', [0,0,1], 0.9) #~ rbprmBuilder.setNormalFilter('hyq_rhleg_rom', [0,0,1], 0.9) rbprmBuilder.boundSO3([-0.,0,-1,1,-1,1]) #~ from hpp.corbaserver.rbprm. import ProblemSolver from hpp.corbaserver.rbprm.problem_solver import ProblemSolver ps = ProblemSolver( rbprmBuilder ) r = Viewer (ps) q_init = rbprmBuilder.getCurrentConfig (); q_init [0:3] = [0, -0.82, 0.648702]; rbprmBuilder.setCurrentConfig (q_init); r (q_init) q_init [0:3] = [0.1, -0.82, 0.648702]; rbprmBuilder.setCurrentConfig (q_init); r (q_init) #~ q_init [0:3] = [0, -0.63, 0.6]; rbprmBuilder.setCurrentConfig (q_init); r (q_init) #~ q_init [3:7] = [ 0.98877108, 0. , 0.14943813, 0. ] q_goal = q_init [::] q_goal [3:7] = [ 0.98877108, 0. , 0.14943813, 0. ] #~ q_goal [0:3] = [1.49, -0.65, 1.25]; r (q_goal) q_goal [0:3] = [2.1, -0.82, 1.0]; r (q_goal) q_goal [0:3] = [3.1, -0.82, 0.55]; r (q_goal) #~ ps.addPathOptimizer("GradientBased") ps.addPathOptimizer("RandomShortcut") ps.setInitialConfig (q_init) ps.addGoalConfig (q_goal)
rbprmBuilder.setAffordanceFilter('hyq_lhleg_rom', ['Support',]) rbprmBuilder.setAffordanceFilter('hyq_rfleg_rom', ['Support',]) rbprmBuilder.setAffordanceFilter('hyq_lfleg_rom', ['Support',]) rbprmBuilder.setAffordanceFilter('hyq_rhleg_rom', ['Support', 'Lean']) # We also bound the rotations of the torso. rbprmBuilder.boundSO3([-0.4,0.4,-3,3,-3,3]) # Creating an instance of HPP problem solver and the viewer from hpp.corbaserver.rbprm.problem_solver import ProblemSolver ps = ProblemSolver( rbprmBuilder ) r = Viewer (ps) r.loadObstacleModel (packageName, "darpa", "planning") # Setting initial and goal configurations q_init = rbprmBuilder.getCurrentConfig (); q_init [0:3] = [-2, 0, 0.63]; rbprmBuilder.setCurrentConfig (q_init); r (q_init) q_goal = q_init [::] q_goal [0:3] = [3, 0, 0.63]; r (q_goal) # Choosing a path optimizer ps.addPathOptimizer("RandomShortcut") ps.setInitialConfig (q_init) ps.addGoalConfig (q_goal) from hpp.corbaserver.affordance import Client c = Client () c.affordance.analyseAll () objs = c.affordance.getAffordancePoints ("Support") import random
from hpp.corbaserver.rbprm.problem_solver import ProblemSolver ps = ProblemSolver( rbprmBuilder ) r = Viewer (ps) q0 = rbprmBuilder.getCurrentConfig (); q_init = rbprmBuilder.getCurrentConfig (); r (q_init) q_goal = q_init [::] q_goal [0:3] = [0.19, 0.05, 0.9]; r (q_goal) #~ fullBody.client.basic.robot.setJointConfig('base_joint_SO3',[0.7316888688738209, 0, -0.6816387600233341, 0]); q_init = rbprmBuilder.getCurrentConfig (); r (q_init) rbprmBuilder.client.basic.robot.setJointConfig('base_joint_SO3',[0.7316888688738209, 0, 0.6816387600233341, 0]); q_init = rbprmBuilder.getCurrentConfig (); r (q_init) q_init = rbprmBuilder.getCurrentConfig (); q_init [0:3] = [-1, 0.05, 0.4]; rbprmBuilder.setCurrentConfig (q_init); r (q_init) #~ q_0 [0:3] = [-0.2, 0, 0.3]; r (q_0) q_goal [0:3] = [0.13, 0.05, 0.8]; r (q_goal) #~ q_init [0:6] = [0.0, -2.2, 2.0, 0.7316888688738209, 0.0, 0.6816387600233341]; #~ rbprmBuilder.setCurrentConfig (q_init); r (q_init) #~ ps.addPathOptimizer("GradientBased") ps.addPathOptimizer("RandomShortcut") ps.setInitialConfig (q_init) ps.addGoalConfig (q_goal) from hpp.corbaserver.affordance.affordance import AffordanceTool afftool = AffordanceTool ()
#~ rbprmBuilder.setNormalFilter('hrp2_rarm_rom', [0,0,1], 0.5) #~ rbprmBuilder.setNormalFilter('hrp2_lleg_rom', [0,0,1], 0.9) #~ rbprmBuilder.setNormalFilter('hrp2_rleg_rom', [0,0,1], 0.9) #~ rbprmBuilder.setNormalFilter('hyq_rhleg_rom', [0,0,1], 0.9) rbprmBuilder.boundSO3([-0.,0,-1,1,-1,1]) #~ from hpp.corbaserver.rbprm. import ProblemSolver from hpp.corbaserver.rbprm.problem_solver import ProblemSolver ps = ProblemSolver( rbprmBuilder ) r = Viewer (ps) q_init = rbprmBuilder.getCurrentConfig (); q_init [0:3] = [0, -0.82, 0.648702]; rbprmBuilder.setCurrentConfig (q_init); r (q_init) q_init [0:3] = [0.1, -0.82, 0.648702]; rbprmBuilder.setCurrentConfig (q_init); r (q_init) #~ q_init [0:3] = [0, -0.63, 0.6]; rbprmBuilder.setCurrentConfig (q_init); r (q_init) #~ q_init [3:7] = [ 0.98877108, 0. , 0.14943813, 0. ] q_goal = q_init [::] q_goal [3:7] = [ 0.98877108, 0. , 0.14943813, 0. ] q_goal [0:3] = [1.49, -0.65, 1.25]; r (q_goal) #~ q_goal [0:3] = [1.2, -0.65, 1.1]; r (q_goal) #~ ps.addPathOptimizer("GradientBased") ps.addPathOptimizer("RandomShortcut") ps.setInitialConfig (q_init) ps.addGoalConfig (q_goal) ps.client.problem.selectConFigurationShooter("RbprmShooter")
#~ rbprmBuilder.setNormalFilter('hrp2_rarm_rom', [0,0,1], 0.5) #~ rbprmBuilder.setNormalFilter('hrp2_lleg_rom', [0,0,1], 0.9) #~ rbprmBuilder.setNormalFilter('hrp2_rleg_rom', [0,0,1], 0.9) #~ rbprmBuilder.setNormalFilter('hyq_rhleg_rom', [0,0,1], 0.9) rbprmBuilder.boundSO3([-0.,0,-1,1,-1,1]) #~ from hpp.corbaserver.rbprm. import ProblemSolver from hpp.corbaserver.rbprm.problem_solver import ProblemSolver ps = ProblemSolver( rbprmBuilder ) r = Viewer (ps) q_init = rbprmBuilder.getCurrentConfig (); q_init [0:3] = [-0., 0, 0.58]; rbprmBuilder.setCurrentConfig (q_init); r (q_init) #~ q_init [0:3] = [0.2, 0, 0.48]; rbprmBuilder.setCurrentConfig (q_init); r (q_init) #~ q_init [0:3] = [0, -0.63, 0.6]; rbprmBuilder.setCurrentConfig (q_init); r (q_init) #~ q_init [3:7] = [ 0.98877108, 0. , 0.14943813, 0. ] q_goal = q_init [::] #~ q_goal [3:7] = [ 0.98877108, 0. , 0.14943813, 0. ] #~ [0.8, -0.82, -0.32, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] q_goal [0:3] = [0.8, 0, -0.07,]; q_goal [0:3] = [0.8, 0, 0.27,]; #~ q_goal [0:3] = [2.0, 0, 0.58,]; #~ [2.0, 0, 0.58, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] #~ q_goal [0:3] = [0.8, 0, 0.17,]; #~ q_goal [0:3] = [1.2, -0.65, 1.1]; r (q_goal)
tested = Builder () tested.loadModel(urdfNameTested, urdfNameRoms, rootJointType, meshPackageName, packageName, urdfSuffix, srdfSuffix) #~ tested.setNormalFilter('hrp2_lleg_rom', [0,0,1], 0.9) #~ tested.setFilter(['hrp2_lleg_rom','hrp2_rleg_rom']) ps = ProblemSolver( tested ) r = Viewer (ps) r.loadObstacleModel (packageName, scene, "planning") tested.setJointBounds ("base_joint_xyz", [-10.,10,-10,10,0,20]) ps.client.problem.selectConFigurationShooter("RbprmShooter") q_init = tested.getCurrentConfig (); q_init [0:3] = [-10, -0.82, 1.25]; tested.setCurrentConfig (q_init); r (q_init) q_goal = q_init [::] q_goal [0:3] = [-9, -0.65, 1.25]; r (q_goal) ps.setInitialConfig (q_init) ps.addGoalConfig (q_goal) t = ps.solve () res = {} x_start = 0 y_start = 0 x_max = 2 y_max = 1.64 iter_step = 0.01 res = {}
#~ from hpp.corbaserver.rbprm. import ProblemSolver from hpp.corbaserver.rbprm.problem_solver import ProblemSolver ps = ProblemSolver( rbprmBuilder ) r = Viewer (ps) q0 = rbprmBuilder.getCurrentConfig (); q_init = rbprmBuilder.getCurrentConfig (); r (q_init) q_goal = q_init [::] q_init = rbprmBuilder.getCurrentConfig (); q_init[0:3] = [0.15, -0.45, 0.8]; r(q_init) rbprmBuilder.setCurrentConfig (q_init); r (q_init) #~ q_goal[0:3] = [1.2,-1,0.5]; r(q_goal) q_goal[0:3] = [0.2,-1.1,0.58]; r(q_goal) ps.addPathOptimizer("RandomShortcut") ps.setInitialConfig (q_init) ps.addGoalConfig (q_goal) ps.client.problem.selectConFigurationShooter("RbprmShooter") ps.client.problem.selectPathValidation("RbprmPathValidation",0.01) r.loadObstacleModel (packageName, "car", "planning") t = ps.solve () print (t) if isinstance(t, list): t = t[0]* 3600000 + t[1] * 60000 + t[2] * 1000 + t[3] f = open('log.txt', 'a')
#~ rbprmBuilder.setNormalFilter('hrp2_rarm_rom', [0,0,1], 0.5) #~ rbprmBuilder.setNormalFilter('hrp2_lleg_rom', [0,0,1], 0.9) #~ rbprmBuilder.setNormalFilter('hrp2_rleg_rom', [0,0,1], 0.9) #~ rbprmBuilder.setNormalFilter('hyq_rhleg_rom', [0,0,1], 0.9) rbprmBuilder.boundSO3([-0.,0,-1,1,-1,1]) #~ from hpp.corbaserver.rbprm. import ProblemSolver from hpp.corbaserver.rbprm.problem_solver import ProblemSolver ps = ProblemSolver( rbprmBuilder ) r = Viewer (ps) q_init = rbprmBuilder.getCurrentConfig (); q_init [0:3] = [1.49, -0.65, 1.25]; rbprmBuilder.setCurrentConfig (q_init); r (q_init) #~ q_init [0:3] = [0, 0, 0.648702]; rbprmBuilder.setCurrentConfig (q_init); r (q_init) q_init [3:7] = [0, 0, 0, 1 ] #~ q_init [0:3] = [0, -0.63, 0.6]; rbprmBuilder.setCurrentConfig (q_init); r (q_init) #~ q_init [3:7] = [ 0.98877108, 0. , 0.14943813, 0. ] q_goal = q_init [::] #~ q_goal [3:7] = [ 0.98877108, 0. , 0.14943813, 0. ] q_goal [0:3] = [0, 0, 0.648702]; r (q_goal) #~ q_goal [0:3] = [1.2, -0.65, 1.1]; r (q_goal) #~ ps.addPathOptimizer("GradientBased") ps.addPathOptimizer("RandomShortcut") ps.setInitialConfig (q_init) ps.addGoalConfig (q_goal)
rbprmBuilder.setAffordanceFilter('SpidermanLFootSphere', affordanceTypeHand) rbprmBuilder.setAffordanceFilter('SpidermanRFootSphere', affordanceTypeHand) rbprmBuilder.setAffordanceFilter('SpidermanLHandSphere', affordanceTypeHand) rbprmBuilder.setAffordanceFilter('SpidermanRHandSphere', affordanceTypeHand) #~ from hpp.corbaserver.rbprm. import ProblemSolver from hpp.corbaserver.rbprm.problem_solver import ProblemSolver ps = ProblemSolver( rbprmBuilder ) r = Viewer (ps) q_init = rbprmBuilder.getCurrentConfig (); #~ q_init [3:7] = [ 0.98877108, 0. , 0.14943813, 0. ] q_init [0:3] = [-0.05, -0.82, 1]; rbprmBuilder.setCurrentConfig (q_init); r (q_init) #~ q_init [0:3] = [0.1, -0.82, 0.648702]; rbprmBuilder.setCurrentConfig (q_init); r (q_init) #~ q_init [0:3] = [0, -0.63, 0.6]; rbprmBuilder.setCurrentConfig (q_init); r (q_init) #~ q_init [3:7] = [ 0.98877108, 0. , 0.14943813, 0. ] q_goal = q_init [::] #~ q_goal [3:7] = [ 0.98877108, 0. , 0.14943813, 0. ] q_goal [0:3] = [-0.05, -0.82, 4.4]; rbprmBuilder.setCurrentConfig (q_goal); r (q_goal) #~ q_goal [0:3] = [3, -0.82, 6]; r(q_goal) #~ q_goal [0:3] = [1.2, -0.65, 1.1]; r (q_goal) #~ ps.addPathOptimizer("GradientBased") ps.addPathOptimizer("RandomShortcut") ps.setInitialConfig (q_init) ps.addGoalConfig (q_goal)
tested = Builder() tested.loadModel(urdfNameTested, urdfNameRoms, rootJointType, meshPackageName, packageName, urdfSuffix, srdfSuffix) #~ tested.setNormalFilter('hrp2_lleg_rom', [0,0,1], 0.9) #~ tested.setFilter(['hrp2_lleg_rom','hrp2_rleg_rom']) ps = ProblemSolver(tested) r = Viewer(ps) r.loadObstacleModel(packageName, scene, "planning") tested.setJointBounds("base_joint_xyz", [-10., 10, -10, 10, 0, 20]) ps.client.problem.selectConFigurationShooter("RbprmShooter") q_init = tested.getCurrentConfig() q_init[0:3] = [-10, -0.82, 1.25] tested.setCurrentConfig(q_init) r(q_init) q_goal = q_init[::] q_goal[0:3] = [-9, -0.65, 1.25] r(q_goal) ps.setInitialConfig(q_init) ps.addGoalConfig(q_goal) t = ps.solve() res = {} x_start = 0 y_start = 0 x_max = 2 y_max = 1.64 iter_step = 0.01
#~ rbprmBuilder.setNormalFilter('hrp2_lleg_rom', [0,0,1], 0.9) #~ rbprmBuilder.setNormalFilter('hrp2_rleg_rom', [0,0,1], 0.9) #~ rbprmBuilder.setNormalFilter('hyq_rhleg_rom', [0,0,1], 0.9) rbprmBuilder.boundSO3([-0.,0,-1,1,-1,1]) #~ from hpp.corbaserver.rbprm. import ProblemSolver from hpp.corbaserver.rbprm.problem_solver import ProblemSolver ps = ProblemSolver( rbprmBuilder ) r = Viewer (ps) q_init = rbprmBuilder.getCurrentConfig (); #~ q_init [3:7] = [ 0.98877108, 0. , 0.14943813, 0. ] q_init [0:3] = [-0.05, -0.82, 0.6]; rbprmBuilder.setCurrentConfig (q_init); r (q_init) #~ q_init [0:3] = [0.1, -0.82, 0.648702]; rbprmBuilder.setCurrentConfig (q_init); r (q_init) #~ q_init [0:3] = [0, -0.63, 0.6]; rbprmBuilder.setCurrentConfig (q_init); r (q_init) #~ q_init [3:7] = [ 0.98877108, 0. , 0.14943813, 0. ] q_goal = q_init [::] #~ q_goal [3:7] = [ 0.98877108, 0. , 0.14943813, 0. ] q_goal [0:3] =[-0.5,-0.82,0.55]; r (q_goal) #~ q_goal [0:3] = [3, -0.82, 6]; r(q_goal) #~ q_goal [0:3] = [1.2, -0.65, 1.1]; r (q_goal) #~ ps.addPathOptimizer("GradientBased") ps.addPathOptimizer("RandomShortcut") ps.setInitialConfig (q_init) ps.addGoalConfig (q_goal)