class GraspingTask: """ Class for grasp learning. """ def __init__(self, pub_marker=None): self.pub_marker = pub_marker self.listener = tf.TransformListener(); def getMarkerPose(self, marker_id, wait = True, timeBack = None): try: marker_name = 'ar_marker_'+str(int(marker_id)) if wait: self.listener.waitForTransform('torso_base', marker_name, rospy.Time.now(), rospy.Duration(4.0)) if timeBack != None: time = rospy.Time.now() - rospy.Duration(timeBack) else: time = rospy.Time(0) jar_marker = self.listener.lookupTransform('torso_base', marker_name, time) except: return None return pm.fromTf(jar_marker) def getCameraPoseFake(self): return PyKDL.Frame(PyKDL.Rotation.Quaternion(-0.6041471326857807, 0.7290707942893216, -0.24257326295862056, 0.21123501385869978), PyKDL.Vector(0.274115,-0.000762625, 1.67876) ) # romoco_02 # return PyKDL.Frame(PyKDL.Rotation.RotY(135.0/180.0*math.pi), PyKDL.Vector(0.4, 0, 1.4)) * PyKDL.Frame(PyKDL.Rotation.RotZ(-90.0/180.0*math.pi)) def getMarkerPoseFake(self, marker_id, wait = True, timeBack = None): if True: # romoco_02 ar_track = [ [5,0,0,0,0,-0.168262043609,0.0930374127131,1.01813819229,0.950324481028,-0.0289575235949,0.00394574675416,0.309885904275], [21,0,0,0,0,0.082505708292,0.110761122122,0.603730984018,-0.458504669821,-0.709444231601,-0.0397255592372,0.533745472997], [30,0,0,0,0,0.104154326153,0.0757350473769,0.587008320764,0.861332293804,0.391007810313,-0.113641433205,0.303817702879], [18,0,0,0,0,0.124413927856,0.111452725921,0.599775167445,0.696719708925,0.480876853947,0.5293727524,-0.0557098514612], [19,0,0,0,0,0.123152725863,0.198755505957,0.716141316519,0.700134532901,0.471859046812,0.532229610902,-0.0623884369125], [22,0,0,0,0,0.0797242701158,0.196635593195,0.713926653472,-0.448409835388,-0.710363705627,-0.044302175627,0.540693390462], ] for mk in ar_track: if mk[0] == marker_id: return self.getCameraPoseFake() * PyKDL.Frame(PyKDL.Rotation.Quaternion(mk[8], mk[9], mk[10], mk[11]), PyKDL.Vector(mk[5], mk[6], mk[7]) ) return None # T_B_Tm = PyKDL.Frame( PyKDL.Rotation.EulerZYZ(0.1, -0.1, 0.0), PyKDL.Vector(0.55,-0.4,0.9) ) T_B_Tm = PyKDL.Frame( PyKDL.Rotation.RotZ(90.0/180.0*math.pi), PyKDL.Vector(0.55,-0.2,0.9) ) T_B_Tbb = PyKDL.Frame( PyKDL.Vector(0.5,-0.8,2.0) ) T_Tm_Bm = PyKDL.Frame( PyKDL.Vector(-0.06, 0.3, 0.135) ) T_Bm_Gm = PyKDL.Frame( PyKDL.Rotation.RotZ(90.0/180.*math.pi) * PyKDL.Rotation.RotY(90.0/180.*math.pi), PyKDL.Vector(0.0,0.0,0.17) ) # the block standing on the table # T_Bm_Gm = PyKDL.Frame( PyKDL.Rotation.RotZ(-30.0/180.*math.pi), PyKDL.Vector(0.1,-0.1,0.06) ) # the block lying on the table # T_Bm_Gm = PyKDL.Frame( PyKDL.Rotation.RotX(90.0/180.*math.pi), PyKDL.Vector(0.1,-0.1,0.06) ) if marker_id == 6: return T_B_Tm elif marker_id == 7: return T_B_Tm * T_Tm_Bm elif marker_id == 8: return T_B_Tbb elif marker_id == 19: # return T_B_Tm * T_Tm_Bm * T_Bm_Gm return T_B_Tm * T_Bm_Gm elif marker_id == 35: return T_B_Tm * T_Tm_Bm * T_Bm_Gm return None def getCameraPose(self): return pm.fromTf(self.listener.lookupTransform('torso_base', 'camera', rospy.Time(0))) def poseUpdaterThread(self, args, *args2): index = 0 z_limit = 0.3 while not rospy.is_shutdown(): rospy.sleep(0.1) if self.allow_update_objects_pose == None or not self.allow_update_objects_pose: continue for obj_name in self.dyn_obj_markers: obj = self.dyn_obj_markers[obj_name] visible_markers_Br_Co = [] visible_markers_weights_ori = [] visible_markers_weights_pos = [] visible_markers_idx = [] for marker in obj:#.markers: T_Br_M = self.getMarkerPose(marker[0], wait = False, timeBack = 0.3) if T_Br_M != None and self.velma != None: T_B_C = self.getCameraPose() T_C_M = T_B_C.Inverse() * T_Br_M v = T_C_M * PyKDL.Vector(0,0,1) - T_C_M * PyKDL.Vector() if v.z() > -z_limit: continue # v.z() is in range (-1.0, -0.3) weight = ((-v.z()) - z_limit)/(1.0-z_limit) if weight > 1.0 or weight < 0.0: print "error: weight==%s"%(weight) T_Co_M = marker[1] T_Br_Co = T_Br_M * T_Co_M.Inverse() visible_markers_Br_Co.append(T_Br_Co) visible_markers_weights_ori.append(1.0-weight) visible_markers_weights_pos.append(weight) visible_markers_idx.append(marker[0]) if len(visible_markers_Br_Co) > 0: # if obj.name == "object": # print "vis: %s"%(visible_markers_idx) # print "w_o: %s"%(visible_markers_weights_ori) # print "w_p: %s"%(visible_markers_weights_pos) # first calculate mean pose without weights R_B_Co = velmautils.meanOrientation(visible_markers_Br_Co)[1] p_B_Co = velmautils.meanPosition(visible_markers_Br_Co) T_B_Co = PyKDL.Frame(copy.deepcopy(R_B_Co.M), copy.deepcopy(p_B_Co)) distances = [] for m_idx in range(0, len(visible_markers_Br_Co)): diff = PyKDL.diff(T_B_Co, visible_markers_Br_Co[m_idx]) distances.append( [diff, m_idx] ) Br_Co_list = [] weights_ori = [] weights_pos = [] for d in distances: if d[0].vel.Norm() > 0.04 or d[0].rot.Norm() > 15.0/180.0*math.pi: continue Br_Co_list.append( visible_markers_Br_Co[d[1]] ) weights_ori.append( visible_markers_weights_ori[d[1]] ) weights_pos.append( visible_markers_weights_pos[d[1]] ) if len(Br_Co_list) > 0: R_B_Co = velmautils.meanOrientation(Br_Co_list, weights=weights_ori)[1] p_B_Co = velmautils.meanPosition(Br_Co_list, weights=weights_pos) # obj.updatePose( PyKDL.Frame(copy.deepcopy(R_B_Co.M), copy.deepcopy(p_B_Co)) ) self.openrave.updatePose(obj_name, T_Br_Co) index += 1 if index >= 100: index = 0 def allowUpdateObjects(self): self.allow_update_objects_pose = True def disallowUpdateObjects(self): self.allow_update_objects_pose = False def waitForOpenraveInit(self): while not rospy.is_shutdown(): if self.openrave.rolling: break rospy.sleep(0.5) def switchToJoint(self, robot): if robot.isCartesianImpedanceActive(): raw_input("Press Enter to enable joint impedance...") if robot.checkStopCondition(): exit(0) robot.switchToJoint() elif robot.isJointImpedanceActive(): pass else: print "FATAL ERROR: impedance control in unknown state: %s %s"%(robot.joint_impedance_active, robot.cartesian_impedance_active) exit(0) def switchToCartesian(self, robot): if robot.isJointImpedanceActive(): raw_input("Press Enter to enable cartesian impedance...") if robot.checkStopCondition(): exit(0) robot.switchToCart() elif robot.isCartesianImpedanceActive(): pass else: print "FATAL ERROR: impedance control in unknown state: %s %s"%(robot.joint_impedance_active, robot.cartesian_impedance_active) exit(0) def calculateWrenchesForTransportTask(self, ext_wrenches_W, transport_T_B_O): ext_wrenches_O = [] for i in range(len(transport_T_B_O)-1): diff_B_O = PyKDL.diff(transport_T_B_O[i], transport_T_B_O[i+1]) # simulate object motion and calculate expected wrenches for t in np.linspace(0.0, 1.0, 5): T_B_Osim = PyKDL.addDelta(transport_T_B_O[i], diff_B_O, t) T_Osim_B = T_B_Osim.Inverse() for ewr in ext_wrenches_W: ext_wrenches_O.append(PyKDL.Frame(T_Osim_B.M) * ewr) return ext_wrenches_O def showPlan(self, plan): init_config = self.openrave.getRobotConfigurationRos() init_T_B_O = None grasped = False for stage in plan: if stage[0] == "move_joint": traj = stage[1] raw_input("Press Enter to visualize the joint trajectory...") duration = math.fsum(traj[3]) joint_names = [] for qi in range(7): joint_names.append( "right_arm_"+str(qi)+"_joint" ) self.openrave.showTrajectory(joint_names, duration * 5.0, traj[4]) # self.openrave.updateRobotConfiguration(qar=traj[0][-1]) qar = {} for qi in range(len(traj[0][-1])): qar["right_arm_"+str(qi)+"_joint"] = traj[0][-1][qi] self.openrave.updateRobotConfigurationRos(qar) elif stage[0] == "grasp": graspable_object_name = stage[1] # grasp = stage[2] if init_T_B_O == None: init_T_B_O = self.openrave.getPose(graspable_object_name) grasped = True self.openrave.grab(graspable_object_name) elif stage[0] == "move_cart": T_B_Wd_traj = stage[1] for idx in range(len(T_B_Wd_traj[0])): init_js = self.openrave.getRobotConfigurationRos() traj = self.velma_solvers.getCartImpWristTraj(init_js, T_B_Wd_traj[0][idx]) raw_input("Press Enter to visualize the cartesian trajectory...") joint_names = [] for qi in range(7): joint_names.append( "right_arm_"+str(qi)+"_joint" ) self.openrave.showTrajectory(joint_names, T_B_Wd_traj[1][idx], traj) # self.openrave.updateRobotConfiguration(qar=traj[-1]) qar = {} for qi in range(len(traj[-1])): qar["right_arm_"+str(qi)+"_joint"] = traj[-1][qi] self.openrave.updateRobotConfigurationRos(qar) elif stage[0] == "move_gripper": g_shape = stage[1] self.openrave.updateRobotConfigurationRos(g_shape) if grasped: self.openrave.release() if init_T_B_O != None: self.openrave.updatePose(graspable_object_name, init_T_B_O) self.openrave.updateRobotConfigurationRos(init_config) def executePlan(self, plan, time_mult): for stage in plan: print stage[0] if stage[0] == "move_joint": traj = stage[1] print "switching to joint impedance..." self.velma.switchToJoint() print "done." duration = math.fsum(traj[3]) print "trajectory len: %s"%(len(traj[0])) raw_input("Press Enter to execute the trajectory on real robot in " + str(duration * time_mult) + "s ...") if self.velma.checkStopCondition(): exit(0) self.velma.moveWristTrajJoint(traj, time_mult, Wrench(Vector3(20,20,20), Vector3(4,4,4))) if self.velma.checkStopCondition(duration * time_mult + 1.0): exit(0) elif stage[0] == "grasp": pass # graspable_object_name = stage[1] # grasp = stage[2] # if init_T_B_O == None: # init_T_B_O = self.openrave.getPose(graspable_object_name) # grasped = True # self.openrave.grab(graspable_object_name) elif stage[0] == "move_cart": self.velma.switchToCart() # move to the desired position self.velma.updateTransformations() traj = stage[1] print traj # T_B_Wd = stage[1] # duration = self.velma.getMovementTime(T_B_Wd, max_v_l=0.1, max_v_r=0.2) raw_input("Press Enter to move the robot in " + str(traj[1][-1]) + " s...") if self.velma.checkStopCondition(): exit(0) self.velma.moveWristTraj(traj[0], traj[1], Wrench(Vector3(20,20,20), Vector3(4,4,4)), abort_on_q5_singularity=True, abort_on_q5_q6_self_collision=True) if self.velma.checkStopCondition(traj[1][-1]): break elif stage[0] == "move_gripper": g_shape = stage[1] q = [ g_shape["right_HandFingerOneKnuckleTwoJoint"], g_shape["right_HandFingerTwoKnuckleTwoJoint"], g_shape["right_HandFingerThreeKnuckleTwoJoint"], g_shape["right_HandFingerOneKnuckleOneJoint"], ] raw_input("Press Enter to change the gripper configuration...") self.velma.move_hand_client(q, t=(3000.0, 3000.0, 3000.0, 3000.0)) def makePlan(self, graspable_object_name, grasp, transport_T_B_O, penalty_threshold): if self.openrave.checkRobotCollision(print_report=True): print "makePlan failed: robot is in collision with environment" return None, None plan_ret = [] config = self.openrave.getRobotConfigurationRos() init_T_B_O = self.openrave.getPose(graspable_object_name) T_B_Ed = self.openrave.getGraspTransform(graspable_object_name, grasp, collisionfree=True) # set the gripper preshape hand_config, contacts_ret_O, normals_O = self.openrave.getFinalConfig(graspable_object_name, grasp, show=False) pre_angle = 30.0/180.0*math.pi preshape = { "right_HandFingerOneKnuckleTwoJoint" : hand_config[0] - pre_angle, "right_HandFingerTwoKnuckleTwoJoint" : hand_config[1] - pre_angle, "right_HandFingerThreeKnuckleTwoJoint" : hand_config[2] - pre_angle, "right_HandFingerOneKnuckleOneJoint" : hand_config[3]} self.openrave.updateRobotConfigurationRos(preshape) if self.openrave.checkRobotCollision(print_report=True): print "makePlan failed: robot is in collision with environment after gripper preshape execution" return None, None plan_ret.append(["move_gripper", preshape]) # plan first trajectory (in configuration space) self.openrave.extendAllObjects(0.02) traj = self.openrave.planMoveForRightArm(T_B_Ed, None, penalty_threshold=penalty_threshold) self.openrave.restoreExtendedObjects() if traj == None: print "colud not plan trajectory in configuration space" return None, None plan_ret.append(["move_joint", traj]) penalty = traj[5] # start planning from the end of the previous trajectory # self.openrave.updateRobotConfiguration(qar=traj[0][-1]) qar = {} for qi in range(len(traj[0][-1])): qar["right_arm_"+str(qi)+"_joint"] = traj[0][-1][qi] self.openrave.updateRobotConfigurationRos(qar) # grab the body self.openrave.grab(graspable_object_name) g_angle = 10.0/180.0*math.pi g_shape = { "right_HandFingerOneKnuckleTwoJoint" : hand_config[0] + g_angle, "right_HandFingerTwoKnuckleTwoJoint" : hand_config[1] + g_angle, "right_HandFingerThreeKnuckleTwoJoint" : hand_config[2] + g_angle, "right_HandFingerOneKnuckleOneJoint" : hand_config[3]} plan_ret.append(["move_gripper", g_shape]) plan_ret.append(["grasp", graspable_object_name]) # calculate the destination pose of the end effector T_B_O = self.openrave.getPose(graspable_object_name) T_E_O = T_B_Ed.Inverse() * T_B_O cart_traj = [] cart_times = [] T_B_Wd1 = T_B_Ed * self.velma.T_E_W print "makePlan:" print T_B_O # print T_B_Wd1 time = 0.0 for idx in range(1, len(transport_T_B_O)): T_B_Ed = transport_T_B_O[idx] * T_E_O.Inverse() T_B_Wd2 = T_B_Ed * self.velma.T_E_W # print T_B_Wd2 print transport_T_B_O[idx] cart_traj.append(T_B_Wd2) time += self.velma.getMovementTime2(T_B_Wd1, T_B_Wd2, max_v_l=0.1, max_v_r=0.2) cart_times.append(time) T_B_Wd1 = T_B_Wd2 # calculate the destination pose of the end effector # T_B_O = self.openrave.getPose(graspable_object_name) # T_E_O = T_B_Ed.Inverse() * T_B_O # T_B_Ed = T_B_Od * T_E_O.Inverse() # T_B_Wd = T_B_Ed * self.velma.T_E_W # interpolate trajectory for the second motion (in the cartesian space) for idx in range(len(cart_traj)): init_js = self.openrave.getRobotConfigurationRos() traj = self.velma_solvers.getCartImpWristTraj(init_js, cart_traj[idx]) if traj == None: print "could not plan trajectory in cartesian space: ", str(idx) self.openrave.release() self.openrave.updatePose(graspable_object_name, init_T_B_O) self.openrave.updateRobotConfigurationRos(config) return None, None # start planning from the end of the previous trajectory # self.openrave.updateRobotConfiguration(qar=traj[-1]) qar = {} for qi in range(len(traj[-1])): qar["right_arm_"+str(qi)+"_joint"] = traj[-1][qi] self.openrave.updateRobotConfigurationRos(qar) plan_ret.append(["move_cart", [cart_traj, cart_times]]) self.openrave.release() self.openrave.updatePose(graspable_object_name, init_T_B_O) self.openrave.updateRobotConfigurationRos(config) return penalty, plan_ret def spin(self): graspable_object_name = "big_box" # get an instance of RosPack with the default search paths rospack = rospkg.RosPack() # list all packages, equivalent to rospack list #rospack.list_pkgs() # get the file path for rospy_tutorials filename_environment = rospack.get_path('velma_scripts') + '/data/romoco/romoco.env.xml' filename_objectmarker = rospack.get_path('velma_scripts') + '/data/romoco/object_marker.txt' filename_wrenches = rospack.get_path('velma_scripts') + '/data/romoco/wrenches_' + graspable_object_name + '.txt' simulation_only = True if simulation_only: time_mult = 5.0 else: time_mult = 20.0 m_id = 0 m_id = 0 self.pub_marker.eraseMarkers(0,3000, frame_id='world') # # Initialise Openrave # rospack = rospkg.RosPack() self.openrave = openraveinstance.OpenraveInstance() self.openrave.startOpenraveURDF(env_file=filename_environment, collision='ode') self.openrave.readRobot(xacro_uri=rospack.get_path('velma_description') + '/robots/velma.urdf.xacro', srdf_uri=rospack.get_path('velma_description') + '/robots/velma.srdf') # self.openrave.startOpenrave(filename_environment) self.openrave.setCamera(PyKDL.Vector(2.0, 0.0, 2.0), PyKDL.Vector(0.60, 0.0, 1.10)) vertices, faces = surfaceutils.readStl(rospack.get_path('velma_scripts')+"/data/romoco/big_box_ascii.stl", scale=1.0) print vertices print faces self.openrave.addTrimesh("big_box", vertices, faces) # # Initialise dynamic objects and their marker information # dyn_objects_map = set() dyn_objects_map.add("table") dyn_objects_map.add("big_box") self.dyn_obj_markers = {} with open(filename_objectmarker, 'r') as f: for line in f: line_s = line.split() obj_name = line_s[0] markers_count = int(line_s[1]) if obj_name in dyn_objects_map: self.dyn_obj_markers[obj_name] = [] for i in range(markers_count): marker_id = int(line_s[2+i*8+0]) frame = PyKDL.Frame( PyKDL.Rotation.Quaternion(float(line_s[2+i*8+1]), float(line_s[2+i*8+2]), float(line_s[2+i*8+3]), float(line_s[2+i*8+4])), PyKDL.Vector(float(line_s[2+i*8+5]), float(line_s[2+i*8+6]), float(line_s[2+i*8+7]))) self.dyn_obj_markers[obj_name].append([marker_id, frame]) # simulation if simulation_only: self.getMarkerPose = self.getMarkerPoseFake self.getCameraPose = self.getCameraPoseFake self.T_World_Br = PyKDL.Frame(PyKDL.Vector(0,0,0.1)) self.velma_solvers = velmautils.VelmaSolvers() self.velma = None print "creating interface for Velma..." # create the interface for Velma robot self.velma = Velma() print "done." rospy.sleep(0.5) self.velma.updateTransformations() # T_W_T = self.velma.T_W_E * PyKDL.Frame(PyKDL.Vector(0,0,0.17)) # print T_W_T.M.GetQuaternion() # print T_W_T.p # exit(0) self.openrave.updateRobotConfigurationRos(self.velma.js_pos) self.allowUpdateObjects() # start thread for updating objects' positions in openrave thread.start_new_thread(self.poseUpdaterThread, (None,1)) self.velma.updateTransformations() # TEST: moveWrist # T_B_Ed = PyKDL.Frame(PyKDL.Vector(0,0.0,0.4)) * self.openrave.getLinkPose("right_HandPalmLink") # T_B_Wd = T_B_Ed * self.velma.T_E_W # init_js = self.openrave.getRobotConfigurationRos() # self.velma.switchToCart() # self.velma.moveTool(PyKDL.Frame(PyKDL.Vector(0,0,-0.3)), 2, stamp=None) # rospy.sleep(2) # self.velma.moveWrist(T_B_Wd, 4, Wrench(Vector3(20,20,20), Vector3(4,4,4))) # exit(0) k_pregrasp = Wrench(Vector3(1000.0, 1000.0, 1000.0), Vector3(300.0, 300.0, 300.0)) k_grasp = Wrench(Vector3(500.0, 500.0, 500.0), Vector3(150.0, 150.0, 150.0)) # reset the gripper self.velma.resetFingers() self.velma.calibrateTactileSensors() self.velma.setMedianFilter(8) raw_input("Press Enter to enable cartesian impedance...") if self.velma.checkStopCondition(): exit(0) self.velma.switchToCart() # start with very low stiffness print "setting stiffness to very low value" self.velma.moveImpedance(self.velma.k_error, 0.5) if self.velma.checkStopCondition(0.5): exit(0) raw_input("Press Enter to continue...") if self.velma.checkStopCondition(): exit(0) self.velma.updateTransformations() # self.velma.updateAndMoveTool( PyKDL.Frame(), 5.0 ) self.velma.updateAndMoveTool( self.velma.T_W_E * PyKDL.Frame(PyKDL.Vector(0,0,0.17)), 5.0 ) if self.velma.checkStopCondition(6.0): exit(0) raw_input("Press Enter to continue...") print "setting stiffness to bigger value" self.velma.moveImpedance(k_pregrasp, 3.0) if self.velma.checkStopCondition(3.0): exit(0) self.velma.updateTransformations() # TEST: planning if False: self.openrave.updateRobotConfigurationRos(self.velma.js_pos) init_T_B_E = self.velma.T_B_W * self.velma.T_W_E T_B_Ed = init_T_B_E * PyKDL.Frame(PyKDL.Vector(0.1,0.1,0)) # plan first trajectory (in configuration space) traj = self.openrave.planMoveForRightArm(T_B_Ed, None) if traj == None: print "colud not plan trajectory in configuration space" return None, None plan = [] plan.append(["move_joint", traj]) # calculate the destination pose of the end effector T_B_Ed = init_T_B_E T_B_Wd = T_B_Ed * self.velma.T_E_W # interpolate trajectory for the second motion (in the cartesian space) plan.append(["move_cart", T_B_Wd]) self.showPlan(plan) print "executing plan..." self.executePlan(plan, time_mult) exit(0) self.disallowUpdateObjects() self.openrave.prepareGraspingModule(graspable_object_name, force_load=True) try: print "trying to read wrenches for each grasp from file" self.openrave.loadWrenchesforAllGrasps(graspable_object_name, filename_wrenches) print "done." except IOError as e: print "could not read from file:" print e print "generating grapsing data..." self.openrave.generateWrenchesforAllGrasps(graspable_object_name) print "done." print "saving grasping data to file..." self.openrave.saveWrenchesforAllGrasps(graspable_object_name, filename_wrenches) print "done." # TEST # T_B_Ed = PyKDL.Frame(PyKDL.Vector(0,0,0.2)) * self.openrave.getLinkPose("right_HandPalmLink") # self.openrave.getGraspsForObjectTransport(graspable_object_name, [PyKDL.Frame(PyKDL.Rotation.RotZ(90.0/180.0*math.pi), PyKDL.Vector(0.5,-0.1,1.0))]) # traj = self.openrave.planMoveForRightArm(T_B_Ed, None) # if traj == None: # print "colud not plan trajectory" # exit(0) # duration = math.fsum(traj[3]) # raw_input("Press Enter to visualize the trajectory...") # if self.velma.checkStopCondition(): # exit(0) # self.openrave.showTrajectory(duration * time_mult * 1, qar_list=traj[4]) # raw_input(".") # exit(0) # # transport task specification # # task_variant = "liftup" task_variant = "rot" # current object pose current_T_B_O = self.openrave.getPose(graspable_object_name) # current_T_B_O = current_T_B_O * PyKDL.Frame(PyKDL.Rotation.RotX(45.0/180.0*math.pi)) #self.openrave.updatePose(graspable_object_name, current_T_B_O) if task_variant == "liftup": # object destination poses T_B_O_trans = PyKDL.Frame(PyKDL.Vector(0,0,0.1)) * current_T_B_O transport_T_B_O = [] transport_T_B_O.append(current_T_B_O) transport_T_B_O.append( T_B_O_trans ) elif task_variant == "rot": # object destination poses T_B_O_trans = PyKDL.Frame(PyKDL.Vector(0,0,0.05)) * current_T_B_O TR_B_O_rot = (PyKDL.Frame(PyKDL.Rotation.RotY(90.0/180.0*math.pi)) * current_T_B_O).M TT_B_O_rot = current_T_B_O.p + PyKDL.Vector(0,0,0.1) T_B_O_rot = PyKDL.Frame(TR_B_O_rot, TT_B_O_rot) transport_T_B_O = [] transport_T_B_O.append(current_T_B_O) transport_T_B_O.append( T_B_O_trans ) transport_T_B_O.append( T_B_O_rot ) else: print "wrong task: ", task_variant exit(0) print "transport_T_B_O:" print transport_T_B_O # # definition of the expected external wrenches for lift-up task for objects c.o.m. in the World frame # ext_wrenches_W = [] # main force (downward) ext_wrenches_W.append(PyKDL.Wrench(PyKDL.Vector(0,0,-1), PyKDL.Vector(0,0,0))) # disturbance forces # ext_wrenches_W.append(PyKDL.Wrench(PyKDL.Vector(0,0,0.1), PyKDL.Vector())) # ext_wrenches_W.append(PyKDL.Wrench(PyKDL.Vector(0,0.1,0), PyKDL.Vector())) # ext_wrenches_W.append(PyKDL.Wrench(PyKDL.Vector(0,-0.1,0), PyKDL.Vector())) # ext_wrenches_W.append(PyKDL.Wrench(PyKDL.Vector(0.1,0,0), PyKDL.Vector())) # ext_wrenches_W.append(PyKDL.Wrench(PyKDL.Vector(-0.1,0,0), PyKDL.Vector())) # disturbance torques max_torque = 0.15 # ext_wrenches_W.append(PyKDL.Wrench(PyKDL.Vector(), PyKDL.Vector(max_torque*0.1, 0, 0))) # ext_wrenches_W.append(PyKDL.Wrench(PyKDL.Vector(), PyKDL.Vector(-max_torque*0.1, 0, 0))) # ext_wrenches_W.append(PyKDL.Wrench(PyKDL.Vector(), PyKDL.Vector(0, max_torque*0.1, 0))) # ext_wrenches_W.append(PyKDL.Wrench(PyKDL.Vector(), PyKDL.Vector(0, -max_torque*0.1, 0))) # ext_wrenches_W.append(PyKDL.Wrench(PyKDL.Vector(), PyKDL.Vector(0, 0, max_torque*0.1))) # ext_wrenches_W.append(PyKDL.Wrench(PyKDL.Vector(), PyKDL.Vector(0, 0, -max_torque*0.1))) ext_wrenches_O = self.calculateWrenchesForTransportTask(ext_wrenches_W, transport_T_B_O) print ext_wrenches_O # TEST: visualise the quality of all grasps if False: m_id = 0 m_id = self.pub_marker.publishSinglePointMarker(PyKDL.Vector(), m_id, r=0.2, g=0.2, b=0.2, namespace='default', frame_id='torso_base', m_type=Marker.CUBE, scale=Vector3(0.177*2, 0.03*2, 0.03*2), T=current_T_B_O) print "generating GWS for all grasps..." self.openrave.generateGWSforAllGrasps(graspable_object_name) for grasp_idx in range(self.openrave.getGraspsCount(graspable_object_name)): gws = self.openrave.getGWSforGraspId(graspable_object_name, grasp_idx) q_min = None for wr_O in ext_wrenches_O: q = self.openrave.getQualityMeasure2(gws, wr_O) if q_min == None or q < q_min: q_min = q grasp = self.openrave.getGrasp(graspable_object_name, grasp_idx) T_B_E = self.openrave.getGraspTransform(graspable_object_name, grasp) scale = q_min * 0.1 m_id = self.pub_marker.publishSinglePointMarker(T_B_E * PyKDL.Vector(), m_id, r=0.6, g=0.6, b=0.6, namespace='default', frame_id='torso_base', m_type=Marker.SPHERE, scale=Vector3(scale, scale, scale), T=None) raw_input(".") exit(0) print "calculating set of possible grasps..." valid_indices = self.openrave.getGraspsForObjectTransport(graspable_object_name, transport_T_B_O) print "done" print "calculating quality measure..." evaluated_grasps = [] for grasp_idx in valid_indices: gws = self.openrave.getGWSforGraspId(graspable_object_name, grasp_idx) q_min = None for wr_O in ext_wrenches_O: q = self.openrave.getQualityMeasure2(gws, wr_O) if q_min == None or q < q_min: q_min = q evaluated_grasps.append([q_min, grasp_idx]) print "done." evaluated_grasps_sorted = sorted(evaluated_grasps, key=operator.itemgetter(0), reverse=True) # show grasps sorted by their scores # for q, grasp_idx in evaluated_grasps_sorted: # print "showing the grasp..." # print q # grasp = self.openrave.getGrasp(graspable_object_name, grasp_idx) # self.openrave.getFinalConfig(graspable_object_name, grasp, show=True) # print "showing the grasp..." # self.openrave.getFinalConfig(graspable_object_name, grasp, show=True) q_max = evaluated_grasps_sorted[0][0] print "max quality: %s"%(q_max) print "len(evaluated_grasps_sorted)", len(evaluated_grasps_sorted) evaluated_plans = [] penalty_threshold = 1000.0 while len(evaluated_grasps_sorted) > 0: best_grasp_q, best_grasp_idx = evaluated_grasps_sorted.pop(0) if best_grasp_q < 0.9 * q_max: print best_grasp_q break grasp = self.openrave.getGrasp(graspable_object_name, best_grasp_idx) penalty, plan = self.makePlan(graspable_object_name, grasp, transport_T_B_O, penalty_threshold) print best_grasp_q, penalty if penalty != None: penalty_threshold = penalty evaluated_plans.append([penalty, best_grasp_idx, plan]) if penalty < 0.000001: break evaluated_plans_sorted = sorted(evaluated_plans, key=operator.itemgetter(0)) print "best plan: %s"%(evaluated_plans_sorted[0][0]) self.showPlan(evaluated_plans_sorted[0][2]) self.executePlan(evaluated_plans_sorted[0][2], time_mult) # grasp = self.openrave.getGrasp(graspable_object_name, evaluated_plans_sorted[0][1]) # print "showing the grasp..." # self.openrave.getFinalConfig(graspable_object_name, grasp, show=True) exit(0)
def spin(self): s = struct.Struct('f f f i B B B B i i i') s2 = struct.Struct('B B B B B B B B B B B B B B B B B B B B B B B B B B B B B B B B') rospack = rospkg.RosPack() env_file=rospack.get_path('velma_scripts') + '/data/jar/cabinet_jar.env.xml' srdf_path=rospack.get_path('velma_description') + '/robots/' object_name = 'jar' print "creating interface for Velma..." # create the interface for Velma robot velma = Velma() print "done." self.listener = tf.TransformListener() rospy.sleep(0.5) self.br = tf.TransformBroadcaster() self.point_cloud_pub = rospy.Publisher("/head_kinect/depth_registered/points", sensor_msgs.msg.PointCloud2, queue_size=100) self.point_cloud = sensor_msgs.msg.PointCloud2() self.point_cloud.header.frame_id = 'head_kinect_rgb_optical_frame' self.point_cloud.header.seq = 0 # self.point_cloud.height = 480 # self.point_cloud.width = 640 self.point_cloud.height = 60 self.point_cloud.width = 80 self.point_cloud.fields.append(sensor_msgs.msg.PointField('x', 0, 7, 1)) self.point_cloud.fields.append(sensor_msgs.msg.PointField('y', 4, 7, 1)) self.point_cloud.fields.append(sensor_msgs.msg.PointField('z', 8, 7, 1)) self.point_cloud.fields.append(sensor_msgs.msg.PointField('rgb', 16, 7, 1)) self.point_cloud.is_bigendian = False self.point_cloud.point_step = 32 self.point_cloud.row_step = self.point_cloud.point_step * self.point_cloud.width self.point_cloud.is_dense = False # # Initialise Openrave # openrave = openraveinstance.OpenraveInstance() openrave.startOpenrave(viewer=False) # openrave.runOctomapServer() openrave.loadEnv(env_file) # openrave.env.GetCollisionChecker().SetCollisionOptions(0)#4) collision_models_urdf = { "velmasimplified0" : ("velma_simplified.srdf", False, True, 0.0, False), } openrave.readRobot(srdf_path=srdf_path, collision_models_urdf=collision_models_urdf) # openrave.addMaskedObjectOctomap("velmasimplified0"); while not rospy.is_shutdown(): time_now, js = velma.getLastJointState() T_B_C = velma.fk_ik_solver.calculateFk('head_kinect_rgb_optical_frame', js) openrave.updateRobotConfigurationRos(js) rospy.sleep(0.1) time_tf = rospy.Time.now()-rospy.Duration(0.5) if self.listener.canTransform('world', 'torso_base', time_tf): pose = self.listener.lookupTransform('world', 'torso_base', time_tf) else: print 'cannot transform' continue T_W_B = pm.fromTf(pose) T_W_C = T_W_B * T_B_C T_C_W = T_W_C.Inverse() valid_points = 0 self.point_cloud.data = [] for y in range(self.point_cloud.height): fy = float(y)/float(self.point_cloud.height)-0.5 for x in range(self.point_cloud.width): fx = float(x)/float(self.point_cloud.width)-0.5 origin = T_W_C * PyKDL.Vector() d = T_W_C * PyKDL.Vector(fx, fy, 1.0) - origin d *= 4.0 report = CollisionReport() ret = openrave.env.CheckCollision(Ray([origin.x(),origin.y(),origin.z()],[d.x(),d.y(),d.z()]), report) if len(report.contacts) > 0: x_W, y_W, z_W = report.contacts[0].pos p_C = T_C_W * PyKDL.Vector(x_W, y_W, z_W) values = ( p_C.x(), p_C.y(), p_C.z(), 0, 128, 128, 128, 255, 0, 0, 0 ) packed_data = s.pack(*values) unpacked = s2.unpack(packed_data) for p in unpacked: self.point_cloud.data.append(p) valid_points += 1 else: for i in range(32): self.point_cloud.data.append(255) self.point_cloud.header.seq += 1 self.point_cloud.header.stamp = time_now self.point_cloud_pub.publish(self.point_cloud) T_W_J = openrave.getPoseW(object_name) T_msg = pm.toMsg(T_W_J) self.br.sendTransform([T_msg.position.x, T_msg.position.y, T_msg.position.z], [T_msg.orientation.x, T_msg.orientation.y, T_msg.orientation.z, T_msg.orientation.w], rospy.Time.now(), object_name, "world")
def spin(self): s = struct.Struct('f f f i B B B B i i i') s2 = struct.Struct('B B B B B B B B B B B B B B B B B B B B B B B B B B B B B B B B') rospack = rospkg.RosPack() srdf_path=rospack.get_path('velma_description') + '/robots/' self.listener = tf.TransformListener() # rospy.sleep(0.5) print "creating interface for Velma..." # create the interface for Velma robot self.velma = Velma() print "done." rospy.Subscriber("/head_kinect/depth_registered/points", sensor_msgs.msg.PointCloud2, self.pc_callback) self.point_cloud_pub = rospy.Publisher("/pointcloud_filter/points", sensor_msgs.msg.PointCloud2, queue_size=100) pointcloud = sensor_msgs.msg.PointCloud2() # # Initialise Openrave # openrave = openraveinstance.OpenraveInstance() openrave.startOpenraveURDF(env_file=None, viewer=False, collision='fcl') collision_models_urdf = { "velmasimplified0" : ("velma_simplified.srdf", False, True, 0.0, False), } openrave.readRobot(srdf_path=srdf_path, collision_models_urdf=collision_models_urdf) sphere_probe = openrave.addSphere('sphere_probe', 0.1) while not rospy.is_shutdown(): self.mutex.acquire() processed = self.processsed self.mutex.release() if not processed: time_stamp = self.pointcloud_orig.header.stamp if self.listener.canTransform('world', self.pointcloud_orig.header.frame_id, time_stamp): pose = self.listener.lookupTransform('world', self.pointcloud_orig.header.frame_id, time_stamp) T_W_C = pm.fromTf(pose) T_C_W = T_W_C.Inverse() else: print 'cannot transform' continue openrave.updateRobotConfigurationRos(self.velma.getJointStateAtTime(time_stamp)) pointcloud.header = self.pointcloud_orig.header pointcloud.height = self.pointcloud_orig.height pointcloud.width = self.pointcloud_orig.width pointcloud.fields = self.pointcloud_orig.fields pointcloud.is_bigendian = self.pointcloud_orig.is_bigendian pointcloud.point_step = self.pointcloud_orig.point_step pointcloud.row_step = self.pointcloud_orig.row_step pointcloud.is_dense = self.pointcloud_orig.is_dense # print pointcloud.fields pointcloud.data = [] for idx in range(0, len(self.pointcloud_orig.data), pointcloud.point_step): packed = self.pointcloud_orig.data[idx:(idx+pointcloud.point_step)] unpacked = s.unpack(packed) if math.isnan(unpacked[0]) or math.isnan(unpacked[1]) or math.isnan(unpacked[3]): unpacked2 = s2.unpack(packed) for i in range(32): pointcloud.data.append(unpacked2[i]) continue x, y, z = unpacked[0:3] pt_C = PyKDL.Vector(x,y,z) pt_W = T_W_C * pt_C sphere_probe.SetTransform(conv.KDLToOpenrave(PyKDL.Frame(pt_W))) report = CollisionReport() if openrave.env.CheckCollision(sphere_probe, report): for i in range(32): pointcloud.data.append(255) else: unpacked2 = s2.unpack(packed) for i in range(32): pointcloud.data.append(unpacked2[i]) self.point_cloud_pub.publish(pointcloud) self.mutex.acquire() self.processsed = True self.mutex.release() rospy.sleep(0.01)
def spin(self): graspable_object_name = "big_box" # get an instance of RosPack with the default search paths rospack = rospkg.RosPack() # list all packages, equivalent to rospack list #rospack.list_pkgs() # get the file path for rospy_tutorials filename_environment = rospack.get_path('velma_scripts') + '/data/romoco/romoco.env.xml' filename_objectmarker = rospack.get_path('velma_scripts') + '/data/romoco/object_marker.txt' filename_wrenches = rospack.get_path('velma_scripts') + '/data/romoco/wrenches_' + graspable_object_name + '.txt' simulation_only = True if simulation_only: time_mult = 5.0 else: time_mult = 20.0 m_id = 0 m_id = 0 self.pub_marker.eraseMarkers(0,3000, frame_id='world') # # Initialise Openrave # rospack = rospkg.RosPack() self.openrave = openraveinstance.OpenraveInstance() self.openrave.startOpenraveURDF(env_file=filename_environment, collision='ode') self.openrave.readRobot(xacro_uri=rospack.get_path('velma_description') + '/robots/velma.urdf.xacro', srdf_uri=rospack.get_path('velma_description') + '/robots/velma.srdf') # self.openrave.startOpenrave(filename_environment) self.openrave.setCamera(PyKDL.Vector(2.0, 0.0, 2.0), PyKDL.Vector(0.60, 0.0, 1.10)) vertices, faces = surfaceutils.readStl(rospack.get_path('velma_scripts')+"/data/romoco/big_box_ascii.stl", scale=1.0) print vertices print faces self.openrave.addTrimesh("big_box", vertices, faces) # # Initialise dynamic objects and their marker information # dyn_objects_map = set() dyn_objects_map.add("table") dyn_objects_map.add("big_box") self.dyn_obj_markers = {} with open(filename_objectmarker, 'r') as f: for line in f: line_s = line.split() obj_name = line_s[0] markers_count = int(line_s[1]) if obj_name in dyn_objects_map: self.dyn_obj_markers[obj_name] = [] for i in range(markers_count): marker_id = int(line_s[2+i*8+0]) frame = PyKDL.Frame( PyKDL.Rotation.Quaternion(float(line_s[2+i*8+1]), float(line_s[2+i*8+2]), float(line_s[2+i*8+3]), float(line_s[2+i*8+4])), PyKDL.Vector(float(line_s[2+i*8+5]), float(line_s[2+i*8+6]), float(line_s[2+i*8+7]))) self.dyn_obj_markers[obj_name].append([marker_id, frame]) # simulation if simulation_only: self.getMarkerPose = self.getMarkerPoseFake self.getCameraPose = self.getCameraPoseFake self.T_World_Br = PyKDL.Frame(PyKDL.Vector(0,0,0.1)) self.velma_solvers = velmautils.VelmaSolvers() self.velma = None print "creating interface for Velma..." # create the interface for Velma robot self.velma = Velma() print "done." rospy.sleep(0.5) self.velma.updateTransformations() # T_W_T = self.velma.T_W_E * PyKDL.Frame(PyKDL.Vector(0,0,0.17)) # print T_W_T.M.GetQuaternion() # print T_W_T.p # exit(0) self.openrave.updateRobotConfigurationRos(self.velma.js_pos) self.allowUpdateObjects() # start thread for updating objects' positions in openrave thread.start_new_thread(self.poseUpdaterThread, (None,1)) self.velma.updateTransformations() # TEST: moveWrist # T_B_Ed = PyKDL.Frame(PyKDL.Vector(0,0.0,0.4)) * self.openrave.getLinkPose("right_HandPalmLink") # T_B_Wd = T_B_Ed * self.velma.T_E_W # init_js = self.openrave.getRobotConfigurationRos() # self.velma.switchToCart() # self.velma.moveTool(PyKDL.Frame(PyKDL.Vector(0,0,-0.3)), 2, stamp=None) # rospy.sleep(2) # self.velma.moveWrist(T_B_Wd, 4, Wrench(Vector3(20,20,20), Vector3(4,4,4))) # exit(0) k_pregrasp = Wrench(Vector3(1000.0, 1000.0, 1000.0), Vector3(300.0, 300.0, 300.0)) k_grasp = Wrench(Vector3(500.0, 500.0, 500.0), Vector3(150.0, 150.0, 150.0)) # reset the gripper self.velma.resetFingers() self.velma.calibrateTactileSensors() self.velma.setMedianFilter(8) raw_input("Press Enter to enable cartesian impedance...") if self.velma.checkStopCondition(): exit(0) self.velma.switchToCart() # start with very low stiffness print "setting stiffness to very low value" self.velma.moveImpedance(self.velma.k_error, 0.5) if self.velma.checkStopCondition(0.5): exit(0) raw_input("Press Enter to continue...") if self.velma.checkStopCondition(): exit(0) self.velma.updateTransformations() # self.velma.updateAndMoveTool( PyKDL.Frame(), 5.0 ) self.velma.updateAndMoveTool( self.velma.T_W_E * PyKDL.Frame(PyKDL.Vector(0,0,0.17)), 5.0 ) if self.velma.checkStopCondition(6.0): exit(0) raw_input("Press Enter to continue...") print "setting stiffness to bigger value" self.velma.moveImpedance(k_pregrasp, 3.0) if self.velma.checkStopCondition(3.0): exit(0) self.velma.updateTransformations() # TEST: planning if False: self.openrave.updateRobotConfigurationRos(self.velma.js_pos) init_T_B_E = self.velma.T_B_W * self.velma.T_W_E T_B_Ed = init_T_B_E * PyKDL.Frame(PyKDL.Vector(0.1,0.1,0)) # plan first trajectory (in configuration space) traj = self.openrave.planMoveForRightArm(T_B_Ed, None) if traj == None: print "colud not plan trajectory in configuration space" return None, None plan = [] plan.append(["move_joint", traj]) # calculate the destination pose of the end effector T_B_Ed = init_T_B_E T_B_Wd = T_B_Ed * self.velma.T_E_W # interpolate trajectory for the second motion (in the cartesian space) plan.append(["move_cart", T_B_Wd]) self.showPlan(plan) print "executing plan..." self.executePlan(plan, time_mult) exit(0) self.disallowUpdateObjects() self.openrave.prepareGraspingModule(graspable_object_name, force_load=True) try: print "trying to read wrenches for each grasp from file" self.openrave.loadWrenchesforAllGrasps(graspable_object_name, filename_wrenches) print "done." except IOError as e: print "could not read from file:" print e print "generating grapsing data..." self.openrave.generateWrenchesforAllGrasps(graspable_object_name) print "done." print "saving grasping data to file..." self.openrave.saveWrenchesforAllGrasps(graspable_object_name, filename_wrenches) print "done." # TEST # T_B_Ed = PyKDL.Frame(PyKDL.Vector(0,0,0.2)) * self.openrave.getLinkPose("right_HandPalmLink") # self.openrave.getGraspsForObjectTransport(graspable_object_name, [PyKDL.Frame(PyKDL.Rotation.RotZ(90.0/180.0*math.pi), PyKDL.Vector(0.5,-0.1,1.0))]) # traj = self.openrave.planMoveForRightArm(T_B_Ed, None) # if traj == None: # print "colud not plan trajectory" # exit(0) # duration = math.fsum(traj[3]) # raw_input("Press Enter to visualize the trajectory...") # if self.velma.checkStopCondition(): # exit(0) # self.openrave.showTrajectory(duration * time_mult * 1, qar_list=traj[4]) # raw_input(".") # exit(0) # # transport task specification # # task_variant = "liftup" task_variant = "rot" # current object pose current_T_B_O = self.openrave.getPose(graspable_object_name) # current_T_B_O = current_T_B_O * PyKDL.Frame(PyKDL.Rotation.RotX(45.0/180.0*math.pi)) #self.openrave.updatePose(graspable_object_name, current_T_B_O) if task_variant == "liftup": # object destination poses T_B_O_trans = PyKDL.Frame(PyKDL.Vector(0,0,0.1)) * current_T_B_O transport_T_B_O = [] transport_T_B_O.append(current_T_B_O) transport_T_B_O.append( T_B_O_trans ) elif task_variant == "rot": # object destination poses T_B_O_trans = PyKDL.Frame(PyKDL.Vector(0,0,0.05)) * current_T_B_O TR_B_O_rot = (PyKDL.Frame(PyKDL.Rotation.RotY(90.0/180.0*math.pi)) * current_T_B_O).M TT_B_O_rot = current_T_B_O.p + PyKDL.Vector(0,0,0.1) T_B_O_rot = PyKDL.Frame(TR_B_O_rot, TT_B_O_rot) transport_T_B_O = [] transport_T_B_O.append(current_T_B_O) transport_T_B_O.append( T_B_O_trans ) transport_T_B_O.append( T_B_O_rot ) else: print "wrong task: ", task_variant exit(0) print "transport_T_B_O:" print transport_T_B_O # # definition of the expected external wrenches for lift-up task for objects c.o.m. in the World frame # ext_wrenches_W = [] # main force (downward) ext_wrenches_W.append(PyKDL.Wrench(PyKDL.Vector(0,0,-1), PyKDL.Vector(0,0,0))) # disturbance forces # ext_wrenches_W.append(PyKDL.Wrench(PyKDL.Vector(0,0,0.1), PyKDL.Vector())) # ext_wrenches_W.append(PyKDL.Wrench(PyKDL.Vector(0,0.1,0), PyKDL.Vector())) # ext_wrenches_W.append(PyKDL.Wrench(PyKDL.Vector(0,-0.1,0), PyKDL.Vector())) # ext_wrenches_W.append(PyKDL.Wrench(PyKDL.Vector(0.1,0,0), PyKDL.Vector())) # ext_wrenches_W.append(PyKDL.Wrench(PyKDL.Vector(-0.1,0,0), PyKDL.Vector())) # disturbance torques max_torque = 0.15 # ext_wrenches_W.append(PyKDL.Wrench(PyKDL.Vector(), PyKDL.Vector(max_torque*0.1, 0, 0))) # ext_wrenches_W.append(PyKDL.Wrench(PyKDL.Vector(), PyKDL.Vector(-max_torque*0.1, 0, 0))) # ext_wrenches_W.append(PyKDL.Wrench(PyKDL.Vector(), PyKDL.Vector(0, max_torque*0.1, 0))) # ext_wrenches_W.append(PyKDL.Wrench(PyKDL.Vector(), PyKDL.Vector(0, -max_torque*0.1, 0))) # ext_wrenches_W.append(PyKDL.Wrench(PyKDL.Vector(), PyKDL.Vector(0, 0, max_torque*0.1))) # ext_wrenches_W.append(PyKDL.Wrench(PyKDL.Vector(), PyKDL.Vector(0, 0, -max_torque*0.1))) ext_wrenches_O = self.calculateWrenchesForTransportTask(ext_wrenches_W, transport_T_B_O) print ext_wrenches_O # TEST: visualise the quality of all grasps if False: m_id = 0 m_id = self.pub_marker.publishSinglePointMarker(PyKDL.Vector(), m_id, r=0.2, g=0.2, b=0.2, namespace='default', frame_id='torso_base', m_type=Marker.CUBE, scale=Vector3(0.177*2, 0.03*2, 0.03*2), T=current_T_B_O) print "generating GWS for all grasps..." self.openrave.generateGWSforAllGrasps(graspable_object_name) for grasp_idx in range(self.openrave.getGraspsCount(graspable_object_name)): gws = self.openrave.getGWSforGraspId(graspable_object_name, grasp_idx) q_min = None for wr_O in ext_wrenches_O: q = self.openrave.getQualityMeasure2(gws, wr_O) if q_min == None or q < q_min: q_min = q grasp = self.openrave.getGrasp(graspable_object_name, grasp_idx) T_B_E = self.openrave.getGraspTransform(graspable_object_name, grasp) scale = q_min * 0.1 m_id = self.pub_marker.publishSinglePointMarker(T_B_E * PyKDL.Vector(), m_id, r=0.6, g=0.6, b=0.6, namespace='default', frame_id='torso_base', m_type=Marker.SPHERE, scale=Vector3(scale, scale, scale), T=None) raw_input(".") exit(0) print "calculating set of possible grasps..." valid_indices = self.openrave.getGraspsForObjectTransport(graspable_object_name, transport_T_B_O) print "done" print "calculating quality measure..." evaluated_grasps = [] for grasp_idx in valid_indices: gws = self.openrave.getGWSforGraspId(graspable_object_name, grasp_idx) q_min = None for wr_O in ext_wrenches_O: q = self.openrave.getQualityMeasure2(gws, wr_O) if q_min == None or q < q_min: q_min = q evaluated_grasps.append([q_min, grasp_idx]) print "done." evaluated_grasps_sorted = sorted(evaluated_grasps, key=operator.itemgetter(0), reverse=True) # show grasps sorted by their scores # for q, grasp_idx in evaluated_grasps_sorted: # print "showing the grasp..." # print q # grasp = self.openrave.getGrasp(graspable_object_name, grasp_idx) # self.openrave.getFinalConfig(graspable_object_name, grasp, show=True) # print "showing the grasp..." # self.openrave.getFinalConfig(graspable_object_name, grasp, show=True) q_max = evaluated_grasps_sorted[0][0] print "max quality: %s"%(q_max) print "len(evaluated_grasps_sorted)", len(evaluated_grasps_sorted) evaluated_plans = [] penalty_threshold = 1000.0 while len(evaluated_grasps_sorted) > 0: best_grasp_q, best_grasp_idx = evaluated_grasps_sorted.pop(0) if best_grasp_q < 0.9 * q_max: print best_grasp_q break grasp = self.openrave.getGrasp(graspable_object_name, best_grasp_idx) penalty, plan = self.makePlan(graspable_object_name, grasp, transport_T_B_O, penalty_threshold) print best_grasp_q, penalty if penalty != None: penalty_threshold = penalty evaluated_plans.append([penalty, best_grasp_idx, plan]) if penalty < 0.000001: break evaluated_plans_sorted = sorted(evaluated_plans, key=operator.itemgetter(0)) print "best plan: %s"%(evaluated_plans_sorted[0][0]) self.showPlan(evaluated_plans_sorted[0][2]) self.executePlan(evaluated_plans_sorted[0][2], time_mult) # grasp = self.openrave.getGrasp(graspable_object_name, evaluated_plans_sorted[0][1]) # print "showing the grasp..." # self.openrave.getFinalConfig(graspable_object_name, grasp, show=True) exit(0)
def spin(self): # test joint impedance controll if False: # create the robot interface for real hardware velma = Velma() print "created robot interface" rospy.sleep(1.0) velma.switchToJoint() print "current q: %s" % (velma.qar) q = copy.deepcopy(velma.qar) q[6] += 0.2 print "next q: %s" % (q) raw_input("Press Enter to move the robot in joint in 5s...") velma.moveWristJoint(q, 1, None) rospy.sleep(1) exit(0) simulation_only = False if simulation_only: time_mult = 5.0 else: time_mult = 10.0 m_id = 0 # create objects definitions obj_table = grip.GraspableObject("table", "box", [0.60, 0.85, 0.07]) # obj_table.addMarker( 6, PyKDL.Frame(PyKDL.Vector(0, -0.225, 0.035)) ) obj_table.addMarker(6, PyKDL.Frame(PyKDL.Vector(0, 0.12, 0.035))) obj_box = grip.GraspableObject("box", "box", [0.22, 0.24, 0.135]) obj_box.addMarker(7, PyKDL.Frame(PyKDL.Vector(-0.07, 0.085, 0.065))) obj_big_box = grip.GraspableObject("big_box", "box", [0.20, 0.20, 2.0]) obj_big_box.addMarker(8, PyKDL.Frame(PyKDL.Vector(0, 0, 1.0))) obj_wall_behind = grip.GraspableObject("wall_behind", "box", [0.20, 3.0, 3.0]) obj_wall_right = grip.GraspableObject("wall_right", "box", [3.0, 0.2, 3.0]) obj_ceiling = grip.GraspableObject("ceiling", "box", [3.0, 3.0, 0.2]) obj_grasp = grip.GraspableObject("object", "box", [0.354, 0.060, 0.060]) obj_grasp_frames_old = [ [ 18, PyKDL.Frame(PyKDL.Rotation.Quaternion(0.0, 0.0, 0.0, 1.0), PyKDL.Vector(-0.0, -0.0, -0.0)) ], [ 19, PyKDL.Frame( PyKDL.Rotation.Quaternion(-0.00785118489648, -0.00136981350282, -0.000184602454162, 0.999968223709), PyKDL.Vector(0.14748831582, -0.00390004064458, 0.00494675382036)) ], [ 20, PyKDL.Frame( PyKDL.Rotation.Quaternion(-0.0108391070454, -0.00679278400361, -0.0154191552083, 0.999799290606), PyKDL.Vector(0.289969171073, -0.00729932931459, 0.00759828464719)) ], [ 21, PyKDL.Frame( PyKDL.Rotation.Quaternion(0.707914450157, 0.00553703354292, -0.0049088621984, 0.706259425134), PyKDL.Vector(-0.00333471065688, -0.0256403932819, -0.0358967610179)) ], [ 22, PyKDL.Frame( PyKDL.Rotation.Quaternion(0.711996124932, 0.000529252451241, -0.00578615630039, 0.702159353971), PyKDL.Vector(0.147443644368, -0.03209918445, -0.028549100504)) ], [ 23, PyKDL.Frame( PyKDL.Rotation.Quaternion(0.714618336612, -0.00917868744082, 0.000177822438207, 0.699454325209), PyKDL.Vector(0.29031370529, -0.0348959795876, -0.0263138015496)) ], [ 24, PyKDL.Frame( PyKDL.Rotation.Quaternion(-0.999814315554, -0.00730751409695, 0.00318617054665, 0.0175437444253), PyKDL.Vector(-0.00774666114837, 0.0127324931914, -0.0605032370936)) ], [ 25, PyKDL.Frame( PyKDL.Rotation.Quaternion(-0.999769709131, -0.00683690807754, 0.00565692317327, 0.0195393093955), PyKDL.Vector(0.143402769587, 0.00560941008048, -0.0682080677974)) ], [ 26, PyKDL.Frame( PyKDL.Rotation.Quaternion(-0.999702001968, 0.00436508873022, 0.00893993421014, 0.0222919455689), PyKDL.Vector(0.2867315755, 0.0037977729025, -0.0723254241133)) ], [ 27, PyKDL.Frame( PyKDL.Rotation.Quaternion(-0.718926115108, 0.0025958563067, 0.000863904789675, 0.695081114845), PyKDL.Vector(0.00685389266037, 0.041611313921, -0.0242848250842)) ], [ 28, PyKDL.Frame( PyKDL.Rotation.Quaternion(-0.723920159064, -0.00406580031329, -0.00237155614562, 0.689867703469), PyKDL.Vector(0.152973875805, 0.0480443334089, -0.0203619760073)) ], [ 29, PyKDL.Frame( PyKDL.Rotation.Quaternion(-0.730592084981, -0.0115053876764, -0.00159217841913, 0.682715384612), PyKDL.Vector(0.296627722109, 0.0526564873934, -0.0157362559562)) ], [ 30, PyKDL.Frame( PyKDL.Rotation.Quaternion(-0.0107101025933, -0.707578018883, -0.00676540180519, 0.706521670039), PyKDL.Vector(-0.0316984701649, 0.00141765295049, -0.0308603633287)) ], [ 31, PyKDL.Frame( PyKDL.Rotation.Quaternion(0.00385143207656, 0.706841586598, 0.00284731518612, 0.707355660699), PyKDL.Vector(0.319944660728, -0.00029327409029, -0.0292236368986)) ], ] obj_grasp_frames = [ [ 18, PyKDL.Frame(PyKDL.Rotation.Quaternion(0.0, 0.0, 0.0, 1.0), PyKDL.Vector(-0.0, -0.0, -0.0)) ], [ 19, PyKDL.Frame( PyKDL.Rotation.Quaternion(-0.00165433105633, -0.00223969436551, 0.00783500583865, 0.999965429223), PyKDL.Vector(0.150364188592, 0.00540315928786, 0.00332539142516)) ], [ 20, PyKDL.Frame( PyKDL.Rotation.Quaternion(0.00288819470565, 0.000787875354111, -0.00584291384849, 0.999978448739), PyKDL.Vector(0.283704103524, 0.00072398461679, -0.00573581222652)) ], [ 21, PyKDL.Frame( PyKDL.Rotation.Quaternion(0.700515458788, 0.00669571919817, -0.000414650985238, 0.71360569463), PyKDL.Vector(0.00448758480338, -0.0246391219393, -0.0318239341873)) ], [ 22, PyKDL.Frame( PyKDL.Rotation.Quaternion(0.703637094621, 0.0128037540093, -0.00696099928093, 0.710410055845), PyKDL.Vector(0.147645037233, -0.0270235353887, -0.0319539994022)) ], [ 23, PyKDL.Frame( PyKDL.Rotation.Quaternion(0.705573146762, -0.0108101497697, 0.0078757141097, 0.708510866789), PyKDL.Vector(0.2869132353, -0.0311870916024, -0.0364408741191)) ], [ 24, PyKDL.Frame( PyKDL.Rotation.Quaternion(0.999936222986, -0.00719147497613, -0.00856953614561, 0.00154780136503), PyKDL.Vector(0.000967154696901, -0.000658291054497, -0.059361255947)) ], [ 25, PyKDL.Frame( PyKDL.Rotation.Quaternion(0.999925422492, 0.00698873688352, -0.00978855330626, 0.00211925234593), PyKDL.Vector(0.139811416338, -0.00107135691589, -0.0658641046354)) ], [ 26, PyKDL.Frame( PyKDL.Rotation.Quaternion(0.999573485418, 0.0127628877053, -0.0151291896644, 0.0214723907838), PyKDL.Vector(0.294537733385, 0.0266765305375, -0.0716188295568)) ], [ 27, PyKDL.Frame( PyKDL.Rotation.Quaternion(-0.715893512402, -0.00285901607723, 0.00295541105269, 0.698197372148), PyKDL.Vector(0.00499777040554, 0.0411443197242, -0.0229397580848)) ], [ 28, PyKDL.Frame( PyKDL.Rotation.Quaternion(-0.720365484604, -0.00848358345308, -0.00122745492272, 0.693541700807), PyKDL.Vector(0.153434321293, 0.0483251803469, -0.017733228985)) ], [ 29, PyKDL.Frame( PyKDL.Rotation.Quaternion(-0.730806278242, -0.012226144189, -0.000233600920018, 0.682475384546), PyKDL.Vector(0.299578008092, 0.0554137486219, -0.0115267264344)) ], [ 30, PyKDL.Frame( PyKDL.Rotation.Quaternion(-0.00153631145759, -0.704471851921, -0.0141334264319, 0.709589526314), PyKDL.Vector(-0.0328832398393, -0.000711552687509, -0.0280278186323)) ], [ 31, PyKDL.Frame( PyKDL.Rotation.Quaternion(-0.00648923236188, 0.70344139916, -0.0037097168268, 0.710713954987), PyKDL.Vector(0.320515478778, -0.000808849733968, -0.0336656231855)) ], ] T_M18_M19 = obj_grasp_frames[1][1] T_M19_Co = PyKDL.Frame(PyKDL.Vector(0, 0, -0.03)) T_M18_Co = T_M18_M19 * T_M19_Co T_Co_M18 = T_M18_Co.Inverse() for marker in obj_grasp_frames: T_M18_Mi = marker[1] obj_grasp.addMarker(marker[0], T_Co_M18 * T_M18_Mi) self.objects = [ obj_table, obj_box, obj_grasp, obj_wall_behind, obj_wall_right, obj_ceiling ] if False: grip.gripUnitTest(obj_grasp) exit(0) # load and init ik solver for right hand velma_ikr = velmautils.VelmaIkSolver() velma_ikr.initIkSolver() # simulation if simulation_only: self.getMarkerPose = self.getMarkerPoseFake # create Openrave interface self.openrave = openraveinstance.OpenraveInstance( PyKDL.Frame(PyKDL.Vector(0, 0, 0.1))) self.openrave.startNewThread() self.waitForOpenraveInit() print "openrave initialised" for obj in self.objects: if obj.isBox(): self.openrave.addBox(obj.name, obj.size[0], obj.size[1], obj.size[2]) print "added objects" self.openrave.updatePose("wall_behind", PyKDL.Frame(PyKDL.Vector(-0.5, 0, 1.5))) self.openrave.updatePose("wall_right", PyKDL.Frame(PyKDL.Vector(0, -1.3, 1.5))) self.openrave.updatePose("ceiling", PyKDL.Frame(PyKDL.Vector(0, 0, 2.3))) if False: index = 18 for fr in frames: print index m_id = self.pub_marker.publishFrameMarker(fr, m_id) raw_input("Press Enter to continue...") rospy.sleep(0.1) index += 1 rospy.sleep(2.0) exit(0) # unit test for surface sampling if False: vertices, indices = self.openrave.getMesh("object") velmautils.sampleMeshUnitTest(vertices, indices, self.pub_marker) self.allowUpdateObjects() # start thread for updating objects' positions in openrave thread.start_new_thread(self.poseUpdaterThread, (None, 1)) if simulation_only: # create the robot interface for simulation velma = VelmaSim(self.openrave, velma_ikr) else: # create the robot interface for real hardware velma = Velma() self.openrave.addRobotInterface(velma) velma.updateTransformations() camera_fov_x = 50.0 / 180.0 * math.pi camera_fov_y = 40.0 / 180.0 * math.pi # self.openrave.setCamera(velma.T_B_C) # self.openrave.addCamera("head_camera",camera_fov_x, camera_fov_y, 0.2) # self.openrave.updatePose("head_camera", velma.T_B_C) k_pregrasp = Wrench(Vector3(1000.0, 1000.0, 1000.0), Vector3(300.0, 300.0, 300.0)) k_grasp = Wrench(Vector3(1000.0, 1000.0, 400.0), Vector3(300.0, 300.0, 300.0)) if True: # reset the gripper velma.resetFingers() velma.calibrateTactileSensors() velma.setMedianFilter(8) raw_input("Press Enter to enable cartesian impedance...") if velma.checkStopCondition(): exit(0) velma.switchToCart() # start with very low stiffness print "setting stiffness to very low value" velma.moveImpedance(velma.k_error, 0.5) if velma.checkStopCondition(0.5): exit(0) raw_input("Press Enter to continue...") if velma.checkStopCondition(): exit(0) velma.updateTransformations() velma.updateAndMoveTool(velma.T_W_E, 1.0) if velma.checkStopCondition(1.0): exit(0) raw_input("Press Enter to continue...") print "setting stiffness to bigger value" velma.moveImpedance(k_pregrasp, 3.0) if velma.checkStopCondition(3.0): exit(0) velma.updateTransformations() velma_init_T_B_W = copy.deepcopy(velma.T_B_W) if simulation_only: # velma.qar[1] += 10.0/180.0*math.pi velma.qar[6] = 90.0 / 180.0 * math.pi rospy.sleep(1.0) # /ar_pose_marker /joint_states /right_arm/impedance /right_arm/torques /right_arm/trajectory /right_arm/wrench /right_hand/BHCmdx /right_hand/BHPressureState /right_hand/BHTemp /spline_trajectory_action_joint/cancel /spline_trajectory_action_joint/feedback /spline_trajectory_action_joint/goal /spline_trajectory_action_joint/result /spline_trajectory_action_joint/status /tf /tf_static ################ # the main loop ################ base_qar = copy.deepcopy(velma.qar) T_B_O_prev = None checked_grasps_idx = [] grips_db = [] if True: self.allowUpdateObjects() rospy.sleep(1.0) T_B_M = self.getMarkerPose(25) T_B_Ed = T_B_M * PyKDL.Frame( PyKDL.Rotation.RotX(180.0 / 180.0 * math.pi)) * PyKDL.Frame( PyKDL.Vector(0, 0, -0.2)) joint_traj_enable = False if joint_traj_enable: traj = self.openrave.planMoveForRightArm(T_B_Ed, None) if traj == None: print "FATAL ERROR: colud not plan trajectory" exit(0) duration = math.fsum(traj[3]) raw_input("Press Enter to visualize the trajectory...") if velma.checkStopCondition(): exit(0) self.openrave.showTrajectory(duration * time_mult * 0.1, qar_list=traj[4]) self.switchToJoint(velma) print "trajectory len: %s" % (len(traj[0])) raw_input( "Press Enter to execute the trajectory on real robot in " + str(duration * time_mult) + "s ...") if velma.checkStopCondition(): exit(0) velma.moveWristTrajJoint( traj, time_mult, Wrench(Vector3(20, 20, 20), Vector3(4, 4, 4))) if velma.checkStopCondition(duration * time_mult + 1.0): exit(0) self.switchToCartesian(velma) # move to the desired position velma.updateTransformations() T_B_Wd = T_B_Ed * velma.T_E_W duration = velma.getMovementTime(T_B_Wd, max_v_l=0.1, max_v_r=0.2) raw_input("Press Enter to move the robot in " + str(duration) + " s...") if velma.checkStopCondition(): exit(0) velma.moveWrist(T_B_Wd, duration, Wrench(Vector3(20, 20, 20), Vector3(4, 4, 4)), abort_on_q5_singularity=False, abort_on_q5_q6_self_collision=True) if velma.checkStopCondition(duration): return if False: raw_input("Press Enter to set lower stiffness in 3s...") velma.moveImpedance(k_grasp, 3.0) if velma.checkStopCondition(3.0): exit(0) # move down velma.updateTransformations() T_B_Ed2 = T_B_Ed * PyKDL.Frame(PyKDL.Vector(0, 0, 0.28)) T_B_Wd = T_B_Ed2 * velma.T_E_W duration = velma.getMovementTime(T_B_Wd, max_v_l=0.05, max_v_r=0.1) raw_input("Press Enter to move the robot in " + str(duration) + " s...") if velma.checkStopCondition(): exit(0) velma.moveWrist(T_B_Wd, duration, Wrench(Vector3(30, 30, 30), Vector3(4, 4, 4)), abort_on_q5_singularity=False, abort_on_q5_q6_self_collision=True) if velma.checkStopCondition(duration): return # move up velma.updateTransformations() T_B_Wd = T_B_Ed * velma.T_E_W duration = velma.getMovementTime(T_B_Wd, max_v_l=0.1, max_v_r=0.2) raw_input("Press Enter to move the robot in " + str(duration) + " s...") if velma.checkStopCondition(): exit(0) velma.moveWrist(T_B_Wd, duration, Wrench(Vector3(30, 30, 30), Vector3(4, 4, 4)), abort_on_q5_singularity=False, abort_on_q5_q6_self_collision=True) if velma.checkStopCondition(duration): return velma.updateTransformations() T_B_Ed2 = T_B_Ed * PyKDL.Frame( PyKDL.Rotation.RotZ(-20.0 / 180.0 * math.pi)) T_B_Wd = T_B_Ed2 * velma.T_E_W duration = velma.getMovementTime(T_B_Wd, max_v_l=0.05, max_v_r=0.1) raw_input("Press Enter to move the robot in " + str(duration) + " s...") if velma.checkStopCondition(): exit(0) velma.moveWrist(T_B_Wd, duration, Wrench(Vector3(40, 40, 40), Vector3(7, 7, 7)), abort_on_q5_singularity=False, abort_on_q5_q6_self_collision=True) if velma.checkStopCondition(duration): return velma.updateTransformations() T_B_Ed2 = T_B_Ed * PyKDL.Frame( PyKDL.Rotation.RotZ(40.0 / 180.0 * math.pi)) T_B_Wd = T_B_Ed2 * velma.T_E_W duration = velma.getMovementTime(T_B_Wd, max_v_l=0.05, max_v_r=0.1) raw_input("Press Enter to move the robot in " + str(duration) + " s...") if velma.checkStopCondition(): exit(0) velma.moveWrist(T_B_Wd, duration, Wrench(Vector3(40, 40, 40), Vector3(7, 7, 7)), abort_on_q5_singularity=False, abort_on_q5_q6_self_collision=True) if velma.checkStopCondition(duration): return raw_input("Press Enter to set bigger stiffness in 3s...") velma.moveImpedance(k_pregrasp, 3.0) if velma.checkStopCondition(3.0): exit(0) # move to base pose print "moving to base pose..." traj = self.openrave.planMoveForRightArm(None, base_qar) if traj == None: print "FATAL ERROR: colud not plan trajectory to base pose" return duration = math.fsum(traj[3]) raw_input("Press Enter to visualize the trajectory...") if velma.checkStopCondition(): exit(0) self.openrave.showTrajectory(duration * time_mult * 0.1, qar_list=traj[4]) self.switchToJoint(velma) print "trajectory len: %s" % (len(traj[0])) raw_input( "Press Enter to execute the trajectory on real robot in " + str(duration * time_mult) + "s ...") if velma.checkStopCondition(): exit(0) velma.moveWristTrajJoint( traj, time_mult, Wrench(Vector3(20, 20, 20), Vector3(4, 4, 4))) if velma.checkStopCondition(duration * time_mult + 1.0): exit(0) raw_input("Press Enter to exit...") exit(0)
def spin(self): # test joint impedance controll if False: # create the robot interface for real hardware velma = Velma() print "created robot interface" rospy.sleep(1.0) velma.switchToJoint() print "current q: %s"%(velma.qar) q = copy.deepcopy(velma.qar) q[6] += 0.2 print "next q: %s"%(q) raw_input("Press Enter to move the robot in joint in 5s...") velma.moveWristJoint(q, 1, None) rospy.sleep(1) exit(0) simulation_only = False if simulation_only: time_mult = 5.0 else: time_mult = 10.0 m_id = 0 # create objects definitions obj_table = grip.GraspableObject("table", "box", [0.60,0.85,0.07]) # obj_table.addMarker( 6, PyKDL.Frame(PyKDL.Vector(0, -0.225, 0.035)) ) obj_table.addMarker( 6, PyKDL.Frame(PyKDL.Vector(0, 0.12, 0.035)) ) obj_box = grip.GraspableObject("box", "box", [0.22,0.24,0.135]) obj_box.addMarker( 7, PyKDL.Frame(PyKDL.Vector(-0.07, 0.085, 0.065)) ) obj_big_box = grip.GraspableObject("big_box", "box", [0.20,0.20,2.0]) obj_big_box.addMarker( 8, PyKDL.Frame(PyKDL.Vector(0, 0, 1.0)) ) obj_wall_behind = grip.GraspableObject("wall_behind", "box", [0.20,3.0,3.0]) obj_wall_right = grip.GraspableObject("wall_right", "box", [3.0,0.2,3.0]) obj_ceiling = grip.GraspableObject("ceiling", "box", [3.0,3.0,0.2]) obj_grasp = grip.GraspableObject("object", "box", [0.354, 0.060, 0.060]) obj_grasp_frames_old = [ [18, PyKDL.Frame(PyKDL.Rotation.Quaternion(0.0,0.0,0.0,1.0),PyKDL.Vector(-0.0,-0.0,-0.0))], [19, PyKDL.Frame(PyKDL.Rotation.Quaternion(-0.00785118489648,-0.00136981350282,-0.000184602454162,0.999968223709),PyKDL.Vector(0.14748831582,-0.00390004064458,0.00494675382036))], [20, PyKDL.Frame(PyKDL.Rotation.Quaternion(-0.0108391070454,-0.00679278400361,-0.0154191552083,0.999799290606),PyKDL.Vector(0.289969171073,-0.00729932931459,0.00759828464719))], [21, PyKDL.Frame(PyKDL.Rotation.Quaternion(0.707914450157,0.00553703354292,-0.0049088621984,0.706259425134),PyKDL.Vector(-0.00333471065688,-0.0256403932819,-0.0358967610179))], [22, PyKDL.Frame(PyKDL.Rotation.Quaternion(0.711996124932,0.000529252451241,-0.00578615630039,0.702159353971),PyKDL.Vector(0.147443644368,-0.03209918445,-0.028549100504))], [23, PyKDL.Frame(PyKDL.Rotation.Quaternion(0.714618336612,-0.00917868744082,0.000177822438207,0.699454325209),PyKDL.Vector(0.29031370529,-0.0348959795876,-0.0263138015496))], [24, PyKDL.Frame(PyKDL.Rotation.Quaternion(-0.999814315554,-0.00730751409695,0.00318617054665,0.0175437444253),PyKDL.Vector(-0.00774666114837,0.0127324931914,-0.0605032370936))], [25, PyKDL.Frame(PyKDL.Rotation.Quaternion(-0.999769709131,-0.00683690807754,0.00565692317327,0.0195393093955),PyKDL.Vector(0.143402769587,0.00560941008048,-0.0682080677974))], [26, PyKDL.Frame(PyKDL.Rotation.Quaternion(-0.999702001968,0.00436508873022,0.00893993421014,0.0222919455689),PyKDL.Vector(0.2867315755,0.0037977729025,-0.0723254241133))], [27, PyKDL.Frame(PyKDL.Rotation.Quaternion(-0.718926115108,0.0025958563067,0.000863904789675,0.695081114845),PyKDL.Vector(0.00685389266037,0.041611313921,-0.0242848250842))], [28, PyKDL.Frame(PyKDL.Rotation.Quaternion(-0.723920159064,-0.00406580031329,-0.00237155614562,0.689867703469),PyKDL.Vector(0.152973875805,0.0480443334089,-0.0203619760073))], [29, PyKDL.Frame(PyKDL.Rotation.Quaternion(-0.730592084981,-0.0115053876764,-0.00159217841913,0.682715384612),PyKDL.Vector(0.296627722109,0.0526564873934,-0.0157362559562))], [30, PyKDL.Frame(PyKDL.Rotation.Quaternion(-0.0107101025933,-0.707578018883,-0.00676540180519,0.706521670039),PyKDL.Vector(-0.0316984701649,0.00141765295049,-0.0308603633287))], [31, PyKDL.Frame(PyKDL.Rotation.Quaternion(0.00385143207656,0.706841586598,0.00284731518612,0.707355660699),PyKDL.Vector(0.319944660728,-0.00029327409029,-0.0292236368986))], ] obj_grasp_frames = [ [18, PyKDL.Frame(PyKDL.Rotation.Quaternion(0.0,0.0,0.0,1.0),PyKDL.Vector(-0.0,-0.0,-0.0))], [19, PyKDL.Frame(PyKDL.Rotation.Quaternion(-0.00165433105633,-0.00223969436551,0.00783500583865,0.999965429223),PyKDL.Vector(0.150364188592,0.00540315928786,0.00332539142516))], [20, PyKDL.Frame(PyKDL.Rotation.Quaternion(0.00288819470565,0.000787875354111,-0.00584291384849,0.999978448739),PyKDL.Vector(0.283704103524,0.00072398461679,-0.00573581222652))], [21, PyKDL.Frame(PyKDL.Rotation.Quaternion(0.700515458788,0.00669571919817,-0.000414650985238,0.71360569463),PyKDL.Vector(0.00448758480338,-0.0246391219393,-0.0318239341873))], [22, PyKDL.Frame(PyKDL.Rotation.Quaternion(0.703637094621,0.0128037540093,-0.00696099928093,0.710410055845),PyKDL.Vector(0.147645037233,-0.0270235353887,-0.0319539994022))], [23, PyKDL.Frame(PyKDL.Rotation.Quaternion(0.705573146762,-0.0108101497697,0.0078757141097,0.708510866789),PyKDL.Vector(0.2869132353,-0.0311870916024,-0.0364408741191))], [24, PyKDL.Frame(PyKDL.Rotation.Quaternion(0.999936222986,-0.00719147497613,-0.00856953614561,0.00154780136503),PyKDL.Vector(0.000967154696901,-0.000658291054497,-0.059361255947))], [25, PyKDL.Frame(PyKDL.Rotation.Quaternion(0.999925422492,0.00698873688352,-0.00978855330626,0.00211925234593),PyKDL.Vector(0.139811416338,-0.00107135691589,-0.0658641046354))], [26, PyKDL.Frame(PyKDL.Rotation.Quaternion(0.999573485418,0.0127628877053,-0.0151291896644,0.0214723907838),PyKDL.Vector(0.294537733385,0.0266765305375,-0.0716188295568))], [27, PyKDL.Frame(PyKDL.Rotation.Quaternion(-0.715893512402,-0.00285901607723,0.00295541105269,0.698197372148),PyKDL.Vector(0.00499777040554,0.0411443197242,-0.0229397580848))], [28, PyKDL.Frame(PyKDL.Rotation.Quaternion(-0.720365484604,-0.00848358345308,-0.00122745492272,0.693541700807),PyKDL.Vector(0.153434321293,0.0483251803469,-0.017733228985))], [29, PyKDL.Frame(PyKDL.Rotation.Quaternion(-0.730806278242,-0.012226144189,-0.000233600920018,0.682475384546),PyKDL.Vector(0.299578008092,0.0554137486219,-0.0115267264344))], [30, PyKDL.Frame(PyKDL.Rotation.Quaternion(-0.00153631145759,-0.704471851921,-0.0141334264319,0.709589526314),PyKDL.Vector(-0.0328832398393,-0.000711552687509,-0.0280278186323))], [31, PyKDL.Frame(PyKDL.Rotation.Quaternion(-0.00648923236188,0.70344139916,-0.0037097168268,0.710713954987),PyKDL.Vector(0.320515478778,-0.000808849733968,-0.0336656231855))], ] T_M18_M19 = obj_grasp_frames[1][1] T_M19_Co = PyKDL.Frame(PyKDL.Vector(0,0,-0.03)) T_M18_Co = T_M18_M19 * T_M19_Co T_Co_M18 = T_M18_Co.Inverse() for marker in obj_grasp_frames: T_M18_Mi = marker[1] obj_grasp.addMarker(marker[0], T_Co_M18 * T_M18_Mi) self.objects = [obj_table, obj_box, obj_grasp, obj_wall_behind, obj_wall_right, obj_ceiling] if False: grip.gripUnitTest(obj_grasp) exit(0) # load and init ik solver for right hand velma_ikr = velmautils.VelmaIkSolver() velma_ikr.initIkSolver() # simulation if simulation_only: self.getMarkerPose = self.getMarkerPoseFake # create Openrave interface self.openrave = openraveinstance.OpenraveInstance(PyKDL.Frame(PyKDL.Vector(0,0,0.1))) self.openrave.startNewThread() self.waitForOpenraveInit() print "openrave initialised" for obj in self.objects: if obj.isBox(): self.openrave.addBox(obj.name, obj.size[0], obj.size[1], obj.size[2]) print "added objects" self.openrave.updatePose("wall_behind", PyKDL.Frame(PyKDL.Vector(-0.5,0,1.5)) ) self.openrave.updatePose("wall_right", PyKDL.Frame(PyKDL.Vector(0,-1.3,1.5)) ) self.openrave.updatePose("ceiling", PyKDL.Frame(PyKDL.Vector(0,0,2.3)) ) if False: index = 18 for fr in frames: print index m_id = self.pub_marker.publishFrameMarker(fr, m_id) raw_input("Press Enter to continue...") rospy.sleep(0.1) index += 1 rospy.sleep(2.0) exit(0) # unit test for surface sampling if False: vertices, indices = self.openrave.getMesh("object") velmautils.sampleMeshUnitTest(vertices, indices, self.pub_marker) self.allowUpdateObjects() # start thread for updating objects' positions in openrave thread.start_new_thread(self.poseUpdaterThread, (None,1)) if simulation_only: # create the robot interface for simulation velma = VelmaSim(self.openrave, velma_ikr) else: # create the robot interface for real hardware velma = Velma() self.openrave.addRobotInterface(velma) velma.updateTransformations() camera_fov_x = 50.0/180.0*math.pi camera_fov_y = 40.0/180.0*math.pi # self.openrave.setCamera(velma.T_B_C) # self.openrave.addCamera("head_camera",camera_fov_x, camera_fov_y, 0.2) # self.openrave.updatePose("head_camera", velma.T_B_C) k_pregrasp = Wrench(Vector3(1000.0, 1000.0, 1000.0), Vector3(300.0, 300.0, 300.0)) k_grasp = Wrench(Vector3(1000.0, 1000.0, 400.0), Vector3(300.0, 300.0, 300.0)) if True: # reset the gripper velma.resetFingers() velma.calibrateTactileSensors() velma.setMedianFilter(8) raw_input("Press Enter to enable cartesian impedance...") if velma.checkStopCondition(): exit(0) velma.switchToCart() # start with very low stiffness print "setting stiffness to very low value" velma.moveImpedance(velma.k_error, 0.5) if velma.checkStopCondition(0.5): exit(0) raw_input("Press Enter to continue...") if velma.checkStopCondition(): exit(0) velma.updateTransformations() velma.updateAndMoveTool( velma.T_W_E, 1.0 ) if velma.checkStopCondition(1.0): exit(0) raw_input("Press Enter to continue...") print "setting stiffness to bigger value" velma.moveImpedance(k_pregrasp, 3.0) if velma.checkStopCondition(3.0): exit(0) velma.updateTransformations() velma_init_T_B_W = copy.deepcopy(velma.T_B_W) if simulation_only: # velma.qar[1] += 10.0/180.0*math.pi velma.qar[6] = 90.0/180.0*math.pi rospy.sleep(1.0) # /ar_pose_marker /joint_states /right_arm/impedance /right_arm/torques /right_arm/trajectory /right_arm/wrench /right_hand/BHCmdx /right_hand/BHPressureState /right_hand/BHTemp /spline_trajectory_action_joint/cancel /spline_trajectory_action_joint/feedback /spline_trajectory_action_joint/goal /spline_trajectory_action_joint/result /spline_trajectory_action_joint/status /tf /tf_static ################ # the main loop ################ base_qar = copy.deepcopy(velma.qar) T_B_O_prev = None checked_grasps_idx = [] grips_db = [] if True: self.allowUpdateObjects() rospy.sleep(1.0) T_B_M = self.getMarkerPose(25) T_B_Ed = T_B_M * PyKDL.Frame(PyKDL.Rotation.RotX(180.0/180.0*math.pi)) * PyKDL.Frame(PyKDL.Vector(0,0,-0.2)) joint_traj_enable = False if joint_traj_enable: traj = self.openrave.planMoveForRightArm(T_B_Ed, None) if traj == None: print "FATAL ERROR: colud not plan trajectory" exit(0) duration = math.fsum(traj[3]) raw_input("Press Enter to visualize the trajectory...") if velma.checkStopCondition(): exit(0) self.openrave.showTrajectory(duration * time_mult * 0.1, qar_list=traj[4]) self.switchToJoint(velma) print "trajectory len: %s"%(len(traj[0])) raw_input("Press Enter to execute the trajectory on real robot in " + str(duration * time_mult) + "s ...") if velma.checkStopCondition(): exit(0) velma.moveWristTrajJoint(traj, time_mult, Wrench(Vector3(20,20,20), Vector3(4,4,4))) if velma.checkStopCondition(duration * time_mult + 1.0): exit(0) self.switchToCartesian(velma) # move to the desired position velma.updateTransformations() T_B_Wd = T_B_Ed * velma.T_E_W duration = velma.getMovementTime(T_B_Wd, max_v_l=0.1, max_v_r=0.2) raw_input("Press Enter to move the robot in " + str(duration) + " s...") if velma.checkStopCondition(): exit(0) velma.moveWrist(T_B_Wd, duration, Wrench(Vector3(20,20,20), Vector3(4,4,4)), abort_on_q5_singularity=False, abort_on_q5_q6_self_collision=True) if velma.checkStopCondition(duration): return if False: raw_input("Press Enter to set lower stiffness in 3s...") velma.moveImpedance(k_grasp, 3.0) if velma.checkStopCondition(3.0): exit(0) # move down velma.updateTransformations() T_B_Ed2 = T_B_Ed * PyKDL.Frame(PyKDL.Vector(0,0,0.28)) T_B_Wd = T_B_Ed2 * velma.T_E_W duration = velma.getMovementTime(T_B_Wd, max_v_l=0.05, max_v_r=0.1) raw_input("Press Enter to move the robot in " + str(duration) + " s...") if velma.checkStopCondition(): exit(0) velma.moveWrist(T_B_Wd, duration, Wrench(Vector3(30,30,30), Vector3(4,4,4)), abort_on_q5_singularity=False, abort_on_q5_q6_self_collision=True) if velma.checkStopCondition(duration): return # move up velma.updateTransformations() T_B_Wd = T_B_Ed * velma.T_E_W duration = velma.getMovementTime(T_B_Wd, max_v_l=0.1, max_v_r=0.2) raw_input("Press Enter to move the robot in " + str(duration) + " s...") if velma.checkStopCondition(): exit(0) velma.moveWrist(T_B_Wd, duration, Wrench(Vector3(30,30,30), Vector3(4,4,4)), abort_on_q5_singularity=False, abort_on_q5_q6_self_collision=True) if velma.checkStopCondition(duration): return velma.updateTransformations() T_B_Ed2 = T_B_Ed * PyKDL.Frame(PyKDL.Rotation.RotZ(-20.0/180.0*math.pi)) T_B_Wd = T_B_Ed2 * velma.T_E_W duration = velma.getMovementTime(T_B_Wd, max_v_l=0.05, max_v_r=0.1) raw_input("Press Enter to move the robot in " + str(duration) + " s...") if velma.checkStopCondition(): exit(0) velma.moveWrist(T_B_Wd, duration, Wrench(Vector3(40,40,40), Vector3(7,7,7)), abort_on_q5_singularity=False, abort_on_q5_q6_self_collision=True) if velma.checkStopCondition(duration): return velma.updateTransformations() T_B_Ed2 = T_B_Ed * PyKDL.Frame(PyKDL.Rotation.RotZ(40.0/180.0*math.pi)) T_B_Wd = T_B_Ed2 * velma.T_E_W duration = velma.getMovementTime(T_B_Wd, max_v_l=0.05, max_v_r=0.1) raw_input("Press Enter to move the robot in " + str(duration) + " s...") if velma.checkStopCondition(): exit(0) velma.moveWrist(T_B_Wd, duration, Wrench(Vector3(40,40,40), Vector3(7,7,7)), abort_on_q5_singularity=False, abort_on_q5_q6_self_collision=True) if velma.checkStopCondition(duration): return raw_input("Press Enter to set bigger stiffness in 3s...") velma.moveImpedance(k_pregrasp, 3.0) if velma.checkStopCondition(3.0): exit(0) # move to base pose print "moving to base pose..." traj = self.openrave.planMoveForRightArm(None, base_qar) if traj == None: print "FATAL ERROR: colud not plan trajectory to base pose" return duration = math.fsum(traj[3]) raw_input("Press Enter to visualize the trajectory...") if velma.checkStopCondition(): exit(0) self.openrave.showTrajectory(duration * time_mult * 0.1, qar_list=traj[4]) self.switchToJoint(velma) print "trajectory len: %s"%(len(traj[0])) raw_input("Press Enter to execute the trajectory on real robot in " + str(duration * time_mult) + "s ...") if velma.checkStopCondition(): exit(0) velma.moveWristTrajJoint(traj, time_mult, Wrench(Vector3(20,20,20), Vector3(4,4,4))) if velma.checkStopCondition(duration * time_mult + 1.0): exit(0) raw_input("Press Enter to exit...") exit(0)
def spin(self): self.pub_marker = velmautils.MarkerPublisher() rospack = rospkg.RosPack() srdf_path=rospack.get_path('velma_description') + '/robots/' obj_filenames = [ rospack.get_path('velma_scripts') + '/data/jar/jar.kinbody.xml', # rospack.get_path('velma_scripts') + '/data/jar/jar_collision.kinbody.xml', ] print "creating interface for Velma..." # create the interface for Velma robot velma = Velma() print "done." self.listener = tf.TransformListener() rospy.sleep(0.5) # self.br = tf.TransformBroadcaster() # # Initialise Openrave # openrave = openraveinstance.OpenraveInstance() openrave.startOpenrave(viewer=True) self.openrave = openrave openrave.runOctomapServer() collision_models_urdf = { "velmasimplified0" : ("velma_simplified.srdf", False, True, 0.0, False), } openrave.readRobot(srdf_path=srdf_path, collision_models_urdf=collision_models_urdf) mo_state = objectstate.MovableObjectsState(openrave.env, obj_filenames) for filename in obj_filenames: body = openrave.env.ReadKinBodyXMLFile(filename) body.Enable(True) body.SetVisible(True) openrave.env.Add(body) openrave.addMaskedObjectToOctomap("velmasimplified0"); self.grasped = {} s = rospy.Service('change_state', state_server_msgs.srv.ChangeState, self.handle_change_state) counter = 0 while not rospy.is_shutdown(): time_now, js = velma.getLastJointState() openrave.updateRobotConfigurationRos(js) time_tf = rospy.Time.now()-rospy.Duration(0.5) added, removed = mo_state.update(self.listener, time_tf) for link_name in self.grasped: object_name, T_L_O = self.grasped[link_name] if object_name in mo_state.obj_map: T_W_L = conv.OpenraveToKDL(openrave.robot_rave.GetLink(link_name).GetTransform()) mo_state.obj_map[object_name].T_W_O = T_W_L * T_L_O mo_state.updateOpenrave(openrave.env) for obj_name in added: openrave.addMaskedObjectToOctomap(obj_name) for obj_name in removed: openrave.removeMaskedObjectFromOctomap(obj_name) # mo_state.updateOpenrave(openrave.env) if counter % 10 ==0: m_id = 0 # publish markers for obj_name in mo_state.obj_map: obj = mo_state.obj_map[obj_name] body = openrave.env.GetKinBody(obj_name) for link in body.GetLinks(): T_W_L = conv.OpenraveToKDL(link.GetTransform()) for geom in link.GetGeometries(): T_L_G = conv.OpenraveToKDL(geom.GetTransform()) g_type = geom.GetType() if g_type == openravepy_int.GeometryType.Cylinder: radius = geom.GetCylinderRadius() height = geom.GetCylinderHeight() m_id = self.pub_marker.publishSinglePointMarker(PyKDL.Vector(), m_id, r=1, g=0, b=0, a=1, namespace='state_server_objects', frame_id='world', m_type=Marker.CYLINDER, scale=Vector3(radius*2, radius*2, height), T=T_W_L*T_L_G) self.pub_marker.eraseMarkers(m_id, 1000, frame_id='world', namespace='state_server_objects') for obj_name in mo_state.obj_map: openrave.UpdateMaskedObjectInOctomap(obj_name) rospy.sleep(0.01) counter += 1 if counter >= 100: counter = 0
def spin(self): sys.setrecursionlimit(200) # generate the set of grasps for a jar grasps_T_J_E = [] for angle_jar_axis in np.arange(0.0, math.pi*2.0, 10.0/180.0*math.pi): for translation_jar_axis in np.linspace(-0.03, 0.03, 7): T_J_E = PyKDL.Frame(PyKDL.Rotation.RotZ(90.0/180.0*math.pi)) * PyKDL.Frame(PyKDL.Rotation.RotX(angle_jar_axis)) * PyKDL.Frame(PyKDL.Vector(translation_jar_axis, 0, -0.17)) grasps_T_J_E.append( (T_J_E, T_J_E.Inverse()) ) T_J_E = PyKDL.Frame(PyKDL.Rotation.RotZ(-90.0/180.0*math.pi)) * PyKDL.Frame(PyKDL.Rotation.RotX(angle_jar_axis)) * PyKDL.Frame(PyKDL.Vector(translation_jar_axis, 0, -0.17)) grasps_T_J_E.append( (T_J_E, T_J_E.Inverse()) ) # TEST: OpenJarTaskRRT if False: task = tasks.OpenJarTaskRRT(None, ("right", grasps_T_J_E, )) m_id = 0 for coord in task.valid: xi,yi,zi = coord x = plutl.x_set[xi] y = plutl.y_set[yi] z = plutl.z_set[zi] rot_set = task.valid[coord] size = float(len(rot_set))/40.0 m_id = self.pub_marker.publishSinglePointMarker(PyKDL.Vector(x,y,z), m_id, r=1.0, g=1*size, b=1*size, a=1, namespace='default', frame_id="torso_link2", m_type=Marker.SPHERE, scale=Vector3(0.05*size, 0.05*size, 0.05*size)) rospy.sleep(0.01) exit(0) rospack = rospkg.RosPack() env_file=rospack.get_path('velma_scripts') + '/data/common/velma_room.env.xml' # env_file=rospack.get_path('velma_scripts') + '/data/common/velma_room_obstacles.env.xml' srdf_path=rospack.get_path('velma_description') + '/robots/' obj_filenames = [ rospack.get_path('velma_scripts') + '/data/jar/jar.kinbody.xml' ] rrt = rrt_star_connect_planner.PlannerRRT(3, env_file, obj_filenames, srdf_path) rospy.wait_for_service('/change_state') changeState = rospy.ServiceProxy('/change_state', state_server_msgs.srv.ChangeState) self.listener = tf.TransformListener() print "creating interface for Velma..." # create the interface for Velma robot velma = Velma() self.pub_head_look_at = rospy.Publisher("/head_lookat_pose", geometry_msgs.msg.Pose) print "done." rospy.sleep(0.5) velma.updateTransformations() # switch to cartesian impedance mode... # velma.switchToCart() # exit(0) velma.switchToJoint() # TEST: moving in joint impedance mode if False: raw_input("Press ENTER to move the robot...") velma.switchToJoint() joint_names = [ "right_arm_4_joint" ] q_dest = [ velma.js_pos["right_arm_4_joint"] - 10.0/180.0*math.pi ] velma.moveJoint(q_dest, joint_names, 10.0, start_time=0.5) traj = [[q_dest], None, None, [10.0]] velma.moveJointTraj(traj, joint_names, start_time=0.5) result = velma.waitForJoint() print "moveJointTraj result", result.error_code exit(0) hv = [1.2, 1.2, 1.2, 1.2] ht = [3000, 3000, 3000, 3000] target_gripper = "right" if target_gripper == "left": velma.moveHandLeft([40.0/180.0*math.pi, 40.0/180.0*math.pi, 40.0/180.0*math.pi, 0], hv, ht, 5000, True) velma.moveHandRight([120.0/180.0*math.pi, 120.0/180.0*math.pi, 120.0/180.0*math.pi, 0], hv, ht, 5000, True) else: velma.moveHandLeft([120.0/180.0*math.pi, 120.0/180.0*math.pi, 120.0/180.0*math.pi, 0], hv, ht, 5000, True) velma.moveHandRight([40.0/180.0*math.pi, 40.0/180.0*math.pi, 40.0/180.0*math.pi, 0], hv, ht, 5000, True) result = velma.waitForHandLeft() print "waitForHandLeft result", result.error_code result = velma.waitForHandRight() print "waitForHandRight result", result.error_code # # Initialise Openrave # openrave = openraveinstance.OpenraveInstance() openrave.startOpenrave(collision='fcl') openrave.loadEnv(env_file) openrave.runOctomapClient(ros=True) openrave.readRobot(srdf_path=srdf_path) for filename in obj_filenames: body = openrave.env.ReadKinBodyXMLFile(filename) body.Enable(False) body.SetVisible(False) openrave.env.Add(body) mo_state = objectstate.MovableObjectsState(openrave.env, obj_filenames) openrave.setCamera(PyKDL.Vector(2.0, 0.0, 2.0), PyKDL.Vector(0.60, 0.0, 1.10)) openrave.updateRobotConfigurationRos(velma.js_pos) while True: if self.listener.canTransform('torso_base', 'jar', rospy.Time(0)): pose = self.listener.lookupTransform('torso_base', 'jar', rospy.Time(0)) T_B_J = pm.fromTf(pose) break print "waiting for planner..." rrt.waitForInit() print "planner initialized" T_B_E_list = [] for T_J_E, T_E_J in grasps_T_J_E: T_B_Ed = T_B_J * T_J_E T_B_E_list.append(T_B_Ed) print "grasps for jar:", len(T_B_E_list) if True: # look around self.pub_head_look_at.publish(pm.toMsg(PyKDL.Frame(PyKDL.Vector(1,0,1.2)))) rospy.sleep(3) # self.pub_head_look_at.publish(pm.toMsg(PyKDL.Frame(PyKDL.Vector(1,-1,1.2)))) # rospy.sleep(2) # self.pub_head_look_at.publish(pm.toMsg(PyKDL.Frame(PyKDL.Vector(1,1,1.2)))) # rospy.sleep(2) # self.pub_head_look_at.publish(pm.toMsg(PyKDL.Frame(PyKDL.Vector(1,0,1.2)))) # rospy.sleep(2) raw_input("Press ENTER to start planning...") openrave.updateOctomap() tree_serialized = openrave.or_octomap_client.SendCommand("GetOcTree") for i in range(10): time_tf = rospy.Time.now()-rospy.Duration(0.5) mo_state.update(self.listener, time_tf) mo_state.updateOpenrave(openrave.env) rospy.sleep(0.1) print "octomap updated" print "Planning trajectory to grasp the jar..." env_state = (openrave.robot_rave.GetDOFValues(), mo_state.obj_map, tree_serialized, []) path, dof_names = rrt.RRTstar(env_state, tasks.GraspTaskRRT, (target_gripper, T_B_E_list), 240.0) traj = [] for i in range(len(path)-1): q1 = path[i] q2 = path[i+1] for f in np.linspace(0.0, 1.0, 40): traj.append( q1 * (1.0 - f) + q2 * f ) while True: if raw_input("Type e to execute") == 'e': break openrave.showTrajectory(dof_names, 10.0, traj) traj = velma.prepareTrajectory(path, velma.getJointStatesByNames(dof_names), dof_names, speed_mult=1.0) velma.switchToJoint() velma.moveJointTraj(traj, dof_names, start_time=0.5) result = velma.waitForJoint() print "moveJointTraj result", result.error_code if result.error_code != 0: exit(0) raw_input("Press ENTER to continue...") openrave.updateRobotConfigurationRos(velma.js_pos) print "before grasp" report = CollisionReport() if openrave.env.CheckCollision(openrave.robot_rave, report): print "collision" else: print "no collision" report = CollisionReport() if openrave.robot_rave.CheckSelfCollision(report): print "self-collision" else: print "no self-collision" raw_input("Press ENTER to close the gripper...") if target_gripper == "left": velma.moveHandLeft([90.0/180.0*math.pi, 90.0/180.0*math.pi, 90.0/180.0*math.pi, 0], hv, ht, 5000, True) result = velma.waitForHandLeft() print "moveHandLeft result", result.error_code else: velma.moveHandRight([90.0/180.0*math.pi, 90.0/180.0*math.pi, 90.0/180.0*math.pi, 0], hv, ht, 5000, True) result = velma.waitForHandRight() print "waitForHandRight result", result.error_code openrave.updateRobotConfigurationRos(velma.js_pos) print "after grasp" report = CollisionReport() if openrave.env.CheckCollision(openrave.robot_rave, report): print "collision" else: print "no collision" report = CollisionReport() if openrave.robot_rave.CheckSelfCollision(report): print "self-collision" else: print "no self-collision" openrave.robot_rave.Grab(openrave.env.GetKinBody("jar"), openrave.robot_rave.GetLink(target_gripper+"_HandPalmLink")) changeState("grasp "+target_gripper+"_HandPalmLink "+ "jar") print "after grab" report = CollisionReport() if openrave.env.CheckCollision(openrave.robot_rave, report): print "collision" else: print "no collision" report = CollisionReport() if openrave.robot_rave.CheckSelfCollision(report): print "self-collision" else: print "no self-collision" print "Planning trajectory to pull the jar from the cabinet..." for i in range(10): time_tf = rospy.Time.now()-rospy.Duration(0.5) mo_state.update(self.listener, time_tf) mo_state.updateOpenrave(openrave.env) rospy.sleep(0.1) env_state = (openrave.robot_rave.GetDOFValues(), mo_state.obj_map, tree_serialized, [["jar", target_gripper+"_HandPalmLink"]]) # path, dof_names = rrt.RRTstar(env_state, tasks.GraspTaskRRT, (target_gripper, T_B_E_list), 60.0) path, dof_names = rrt.RRTstar(env_state, tasks.MoveArmsCloseTaskRRT, (None,), 120.0) # path.reverse() traj = [] for i in range(len(path)-1): q1 = path[i] q2 = path[i+1] for f in np.linspace(0.0, 1.0, 40): traj.append( q1 * (1.0 - f) + q2 * f ) while True: if raw_input("Type e to execute") == 'e': break openrave.showTrajectory(dof_names, 10.0, traj) traj = velma.prepareTrajectory(path, velma.getJointStatesByNames(dof_names), dof_names, speed_mult=1.0) result = velma.moveJointTraj(traj, dof_names, start_time=0.5) result = velma.waitForJoint() print "moveJointTraj result", result.error_code if result.error_code != 0: exit(0) raw_input("Press ENTER to continue...") openrave.updateRobotConfigurationRos(velma.js_pos) changeState("drop "+target_gripper+"_HandPalmLink") raw_input("Press ENTER to exit...") rrt.cleanup()
def spin(self): simulation = True rospack = rospkg.RosPack() env_file=rospack.get_path('velma_scripts') + '/data/key/vis_test.env.xml' xacro_uri=rospack.get_path('velma_description') + '/robots/velma.urdf.xacro' srdf_path=rospack.get_path('velma_description') + '/robots/' rrt = rrt_star_connect_planner.PlannerRRT(3, env_file, xacro_uri, srdf_path) print "creating interface for Velma..." # create the interface for Velma robot self.velma = Velma() print "done." rospy.sleep(0.5) self.velma.updateTransformations() if simulation: hv = [3.2, 3.2, 3.2, 3.2] ht = [3000, 3000, 3000, 3000] self.velma.moveHandLeft([120.0/180.0*math.pi, 120.0/180.0*math.pi, 120.0/180.0*math.pi, 0], hv, ht, 5000, True) self.velma.moveHandRight([120.0/180.0*math.pi, 120.0/180.0*math.pi, 120.0/180.0*math.pi, 0], hv, ht, 5000, True) rospy.sleep(1.0) # # Initialise Openrave # openrave = openraveinstance.OpenraveInstance() openrave.startOpenraveURDF(env_file=env_file) openrave.readRobot(xacro_uri=xacro_uri, srdf_path=srdf_path) rrt.waitForInit() openrave.setCamera(PyKDL.Vector(2.0, 0.0, 2.0), PyKDL.Vector(0.60, 0.0, 1.10)) openrave.updateRobotConfigurationRos(self.velma.js_pos) # TEST: key hole goal if False: task = KeyRotTaskRRT(openrave) task.SampleGoal(None, None) exit(0) # TEST: head IK if False: v_rot = 0.800 v_lean = 0.375 v_head = 0.392 h_cam = 0.0 v_cam = 0.225 head_kin = headkinematics.HeadKinematics(v_rot, v_lean, v_head, h_cam, v_cam) openrave.addSphere("target_sphere", 0.05) while not rospy.is_shutdown(): head_kin.UpdateTorsoPose(openrave.robot_rave.GetJoint("torso_0_joint").GetValue(0), openrave.robot_rave.GetJoint("torso_1_joint").GetValue(0)) target_pos = PyKDL.Vector(1, random.uniform(-1.5, 1.5), random.uniform(0,2)) head_kin.UpdateTargetPosition(target_pos.x(), target_pos.y(), target_pos.z()) openrave.updatePose("target_sphere", PyKDL.Frame(target_pos)) head_kin.TransformTargetToHeadFrame() joint_pan, joint_tilt = head_kin.CalculateHeadPose() if joint_pan == None: continue openrave.robot_rave.SetDOFValues([joint_pan, joint_tilt], [openrave.robot_rave.GetJoint("head_pan_joint").GetDOFIndex(), openrave.robot_rave.GetJoint("head_tilt_joint").GetDOFIndex()]) raw_input("Press ENTER to continue...") exit(0) raw_input("Press ENTER to continue...") # T_B_E_list = [PyKDL.Frame(PyKDL.Rotation.RotY(90.0/180.0*math.pi), PyKDL.Vector(0.6, 0, 1.6))] # path, dof_names = rrt.RRTstar(openrave.robot_rave.GetDOFValues(), tasks.GraspTaskRRT, ("right", T_B_E_list), 30.0) path, dof_names = rrt.RRTstar(openrave.robot_rave.GetDOFValues(), tasks.KeyRotTaskRRT, ("right",), 30.0) # path, dof_names = rrt.RRTstar(openrave.robot_rave.GetDOFValues(), tasks.LooAtTaskRRT, ("right",), 60.0) # path, dof_names = rrt.RRTstar(openrave.robot_rave.GetDOFValues(), tasks.MoveArmsCloseTaskRRT, ("right",), 30.0) traj = [] for i in range(len(path)-1): q1 = path[i] q2 = path[i+1] for f in np.linspace(0.0, 1.0, 40): traj.append( q1 * (1.0 - f) + q2 * f ) while True: if raw_input("Type e to exit") == 'e': break openrave.showTrajectory(dof_names, 10.0, traj) rrt.cleanup() # raw_input("Press ENTER to exit...") exit(0) rospy.sleep(1) openrave.runOctomap() sphere = RaveCreateKinBody(openrave.env,'') sphere.SetName("sphere") sphere.InitFromSpheres(numpy.array([[0,0,0,0.05]]),True) openrave.env.Add(sphere,True) # test the collision checker for octomap if True: raw_input("Press ENTER to continue...") ob = openrave.env.GetKinBody("_OCTOMAP_MAP_") cc = openrave.env.GetCollisionChecker() m_id = 0 for x in np.linspace(0,1.5,30): for y in np.linspace(-1,1,40): for z in np.linspace(1,2,20): # print x,y,z tr = conv.KDLToOpenrave(PyKDL.Frame(PyKDL.Vector(x,y,z))) sphere.SetTransform(tr) openrave.env.UpdatePublishedBodies() report = CollisionReport() ret = cc.CheckCollision(sphere, report) # ret = openrave.env.CheckCollision(ob, report) # print ret if ret: m_id = self.pub_marker.publishSinglePointMarker(PyKDL.Vector(x,y,z), m_id, r=1, g=0, b=0, a=1, namespace='default', frame_id='world', m_type=Marker.SPHERE, scale=Vector3(0.1, 0.1, 0.1), T=None) continue if report.plink1 == None: print None else: print report.plink1.GetParent().GetName(), report.plink2.GetName() # print " ", report.vLinkColliding for link1, link2 in report.vLinkColliding: print " ", link1.GetParent().GetName(), link2.GetName() # print report.plink1.GetParent().GetName(), report.plink2.GetParent().GetName() exit(0) self.pub_head_look_at = rospy.Publisher("/head_lookat_pose", geometry_msgs.msg.Pose) raw_input("Press ENTER to look around...") # self.pub_head_look_at.publish(pm.toMsg(PyKDL.Frame(PyKDL.Vector(0.2,-0.5,1)))) # raw_input("Press ENTER to exit...") # exit(0) raw_input("Press ENTER to look around...") self.pub_head_look_at.publish(pm.toMsg(PyKDL.Frame(PyKDL.Vector(1,0,2)))) raw_input("Press ENTER to look around...") self.pub_head_look_at.publish(pm.toMsg(PyKDL.Frame(PyKDL.Vector(0.2,1,2)))) raw_input("Press ENTER to look around...") self.pub_head_look_at.publish(pm.toMsg(PyKDL.Frame(PyKDL.Vector(0.2,1,1.2)))) raw_input("Press ENTER to look around...") self.pub_head_look_at.publish(pm.toMsg(PyKDL.Frame(PyKDL.Vector(1,0,1.2)))) raw_input("Press ENTER to look around...") self.pub_head_look_at.publish(pm.toMsg(PyKDL.Frame(PyKDL.Vector(0.2,-1,1.2)))) raw_input("Press ENTER to look around...") self.pub_head_look_at.publish(pm.toMsg(PyKDL.Frame(PyKDL.Vector(0.2,-1,2)))) raw_input("Press ENTER to look around...") self.pub_head_look_at.publish(pm.toMsg(PyKDL.Frame(PyKDL.Vector(1,0,2)))) raw_input(".") exit(0)
class GraspingTask: """ Class for grasp learning. """ def __init__(self, pub_marker=None): self.pub_marker = pub_marker self.listener = tf.TransformListener(); def posesCollectorThread(self, poses_list): while not rospy.is_shutdown() and self.collect_poses: self.velma.updateTransformations() poses_list.append( self.velma.T_B_W * self.velma.T_W_T ) rospy.sleep(0.1) def wristFollowerThread(self, poses_list): while not rospy.is_shutdown() and self.follow_wrist_pose: self.velma.updateTransformations() T_B_Wd = self.velma.T_B_W time = self.velma.getMovementTime(T_B_Wd, max_v_l = 0.02, max_v_r = 0.04) if time < 1.0: time = 1.0 self.velma.moveWrist(T_B_Wd, time, Wrench(Vector3(40,40,40), Vector3(14,14,14))) rospy.sleep(time+0.1) def estCommonPoint(self, T_B_T_list): def f_2(tool_end_estimate): tool_end_T = PyKDL.Vector(tool_end_estimate[0], tool_end_estimate[1], tool_end_estimate[2]) tool_end_B_list = [] mean_pt_B = PyKDL.Vector() for T_B_T in T_B_T_list: tool_end_B = T_B_T * tool_end_T tool_end_B_list.append( tool_end_B ) mean_pt_B += tool_end_B mean_pt_B = mean_pt_B/len(T_B_T_list) dist_list = [] for tool_end_B in tool_end_B_list: dist_list.append( (tool_end_B - mean_pt_B).Norm() ) return dist_list tool_end_estimate = 0.0, 0.0, 0.0 tool_end_estimate_ret, ier = optimize.leastsq(f_2, tool_end_estimate, maxfev = 1000) return PyKDL.Vector(tool_end_estimate_ret[0], tool_end_estimate_ret[1], tool_end_estimate_ret[2]), sum( f_2(tool_end_estimate_ret) ) def estCommonPointTest(self): key_endpoint_T = PyKDL.Vector(0,0,0.2) central_point_B = PyKDL.Vector(1,0,1) T_B_T_1 = PyKDL.Frame(PyKDL.Rotation.RotX(30.0/180.0*math.pi), central_point_B) * PyKDL.Frame(-key_endpoint_T) T_B_T_2 = PyKDL.Frame(PyKDL.Rotation.RotY(30.0/180.0*math.pi), central_point_B) * PyKDL.Frame(-key_endpoint_T) T_B_T_3 = PyKDL.Frame(PyKDL.Rotation.RotX(0.0/180.0*math.pi), central_point_B) * PyKDL.Frame(-key_endpoint_T) print self.estCommonPoint([T_B_T_1, T_B_T_2, T_B_T_3]) def generateSpiral(self, max_radius, min_dist): points = [(0,0)] rounds = max_radius / min_dist for angle in np.arange(0.0, rounds*math.pi*2.0, 1.0/180.0*math.pi): rx = math.cos(angle) ry = math.sin(angle) r = min_dist * angle/math.pi/2.0 nx = r * rx ny = r * ry dist = math.sqrt( (points[-1][0]-nx)*(points[-1][0]-nx) + (points[-1][1]-ny)*(points[-1][1]-ny) ) if dist > min_dist: points.append( (nx, ny) ) return points def spin(self): simulation_only = False key_endpoint_T = PyKDL.Vector(0,0,0.2) T_B_P = PyKDL.Frame(PyKDL.Rotation(), PyKDL.Vector(1,0,1)) m_id = 0 self.pub_marker.eraseMarkers(0,3000, frame_id='world') print "creating interface for Velma..." # create the interface for Velma robot self.velma = Velma() print "done." m_id = 0 self.pub_marker.eraseMarkers(0,3000, frame_id='world') # key and grasp parameters self.T_E_O = PyKDL.Frame() self.T_E_Orot = PyKDL.Frame(PyKDL.Vector(0, 0, 0.07)) self.key_axis_O = PyKDL.Vector(0,0,1) self.key_up_O = PyKDL.Vector(1,0,0) self.key_side_O = self.key_axis_O * self.key_up_O # change the tool - the safe way print "switching to joint impedance..." if not self.velma.switchToJoint(): print "ERROR: switchToJoint" exit(0) rospy.sleep(0.5) print "updating tool..." self.velma.updateTransformations() self.velma.updateAndMoveToolOnly(PyKDL.Frame(self.velma.T_W_E.p+PyKDL.Vector(0.1,0,0)), 0.1) rospy.sleep(0.5) print "done." print "switching to cartesian impedance..." if not self.velma.switchToCart(): print "ERROR: switchToCart" exit(0) rospy.sleep(0.5) raw_input("Press ENTER to continue...") q_grasp = [1.4141768883517725, 1.4179811607057289, 1.4377081536379384, 0] q_pre = 10.0/180.0*math.pi hv = [1.2, 1.2, 1.2, 1.2] ht = [3000, 3000, 3000, 3000] # set gripper configuration if True: self.velma.moveHand([0.0, 0.0, 0.0, 0.0/180.0*math.pi], hv, ht, 5000, True) rospy.sleep(3) self.velma.moveHand([q_grasp[0]-q_pre, q_grasp[1]-q_pre, q_grasp[2]-q_pre, q_grasp[3]], hv, ht, 5000, True) rospy.sleep(5) self.velma.moveHand([q_grasp[0]+q_pre, q_grasp[1]+q_pre, q_grasp[2]+q_pre, q_grasp[3]], hv, ht, 5000, True) rospy.sleep(3) if False: # start with very low stiffness print "setting stiffness to very low value" k_low = Wrench(Vector3(1.0, 1.0, 1.0), Vector3(0.5, 0.5, 0.5)) self.velma.moveImpedance(k_low, 0.5) if self.velma.checkStopCondition(0.5): exit(0) print "done." print "identifying the parameters of tool in..." wait_time = 20 for i in range(wait_time): print "%s s"%(wait_time-i) rospy.sleep(1.0) print "collecting points..." self.collect_poses = True T_B_T_list = [] thread.start_new_thread(self.posesCollectorThread, (T_B_T_list,)) collect_time = 10 for i in range(collect_time): print "%s s"%(collect_time-i) rospy.sleep(1.0) self.collect_poses = False rospy.sleep(0.5) key_endpoint_T, error = self.estCommonPoint(T_B_T_list) print key_endpoint_T, error self.key_endpoint_O = self.T_E_O.Inverse() * self.velma.T_W_E.Inverse() * self.velma.T_W_T * key_endpoint_T print "self.key_endpoint_O = PyKDL.Vector(%s, %s, %s)"%(self.key_endpoint_O[0], self.key_endpoint_O[1], self.key_endpoint_O[2]) print "done." else: # self.key_endpoint_O = PyKDL.Vector(-0.00248664992634, -6.7348683886e-05, 0.232163117525) self.key_endpoint_O = PyKDL.Vector(0.000256401261281, -0.000625166847342, 0.232297442735) k_high = Wrench(Vector3(1000.0, 1000.0, 1000.0), Vector3(150, 150, 150)) max_force = 12.0 max_torque = 12.0 path_tol = (PyKDL.Vector(max_force/k_high.force.x, max_force/k_high.force.y, max_force/k_high.force.z), PyKDL.Vector(max_torque/k_high.torque.x, max_torque/k_high.torque.y, max_torque/k_high.torque.z)) max_force2 = 20.0 max_torque2 = 20.0 path_tol2 = (PyKDL.Vector(max_force2/k_high.force.x, max_force2/k_high.force.y, max_force2/k_high.force.z), PyKDL.Vector(max_torque2/k_high.torque.x, max_torque2/k_high.torque.y, max_torque2/k_high.torque.z)) path_tol3 = (PyKDL.Vector(max_force/k_high.force.x, max_force/k_high.force.y, max_force2/k_high.force.z), PyKDL.Vector(max_torque/k_high.torque.x, max_torque/k_high.torque.y, max_torque/k_high.torque.z)) print "path tolerance:", path_tol # k_hole_in = Wrench(Vector3(1000.0, 500.0, 500.0), Vector3(200, 200, 200)) k_hole_in = Wrench(Vector3(100.0, 1000.0, 1000.0), Vector3(50, 5, 5)) path_tol_in = (PyKDL.Vector(max_force2/k_hole_in.force.x, max_force/k_hole_in.force.y, max_force/k_hole_in.force.z), PyKDL.Vector(max_torque2/k_hole_in.torque.x, max_torque2/k_hole_in.torque.y, max_torque2/k_hole_in.torque.z)) path_tol_in2 = (PyKDL.Vector(max_force2/k_hole_in.force.x, max_force2/k_hole_in.force.y, max_force2/k_hole_in.force.z), PyKDL.Vector(max_torque2/k_hole_in.torque.x, max_torque2/k_hole_in.torque.y, max_torque2/k_hole_in.torque.z)) if False: k_increase = [ Wrench(Vector3(10.0, 10.0, 10.0), Vector3(5, 5, 5)), Wrench(Vector3(50.0, 50.0, 50.0), Vector3(25, 25, 25)), Wrench(Vector3(250.0, 250.0, 250.0), Vector3(100, 100, 100)) ] for k in k_increase: raw_input("Press Enter to set bigger stiffness...") self.velma.updateTransformations() T_B_Wd = self.velma.T_B_W time = self.velma.getMovementTime(T_B_Wd, max_v_l = 0.02, max_v_r = 0.04) print "moving the wrist to the current pose in " + str(time) + " s..." self.velma.moveWrist(T_B_Wd, time, Wrench(Vector3(40,40,40), Vector3(14,14,14))) rospy.sleep(time) print "done." print "setting stiffness to bigger value" self.velma.moveImpedance(k, 1.0) if self.velma.checkStopCondition(1.1): exit(0) print "done." print "move the grasped key near the key hole..." self.follow_wrist_pose = True thread.start_new_thread(self.wristFollowerThread, (None,)) raw_input("Press ENTER to save the pose...") self.follow_wrist_pose = False rospy.sleep(0.5) self.velma.updateTransformations() self.T_B_O_nearHole = self.velma.T_B_W * self.velma.T_W_E * self.T_E_O p = self.T_B_O_nearHole.p q = self.T_B_O_nearHole.M.GetQuaternion() print "self.T_B_O_nearHole = PyKDL.Frame(PyKDL.Rotation.Quaternion(%s, %s, %s, %s), PyKDL.Vector(%s, %s, %s))"%(q[0], q[1], q[2], q[3], p[0], p[1], p[2]) T_B_Wd = self.velma.T_B_W time = self.velma.getMovementTime(T_B_Wd, max_v_l = 0.02, max_v_r = 0.04) print "moving the wrist to the current pose in " + str(time) + " s..." self.velma.moveWrist(T_B_Wd, time, Wrench(Vector3(40,40,40), Vector3(14,14,14))) status, feedback = self.velma.waitForCartEnd() print "setting stiffness to", k_high self.velma.moveImpedance(k_high, 1.0) if self.velma.checkStopCondition(1.0): exit(0) print "done." else: # self.T_B_O_nearHole = PyKDL.Frame(PyKDL.Rotation.Quaternion(0.697525674378, -0.00510212356955, 0.715527762697, 0.0381041038336), PyKDL.Vector(0.528142123375, 0.0071159410235, 1.31300679435)) # self.T_B_O_nearHole = PyKDL.Frame(PyKDL.Rotation.Quaternion(0.699716675653, -0.0454110538496, 0.709529999372, 0.0700113561626), PyKDL.Vector(0.546491646893, -0.101297899801, 1.30959887442)) self.T_B_O_nearHole = PyKDL.Frame(PyKDL.Rotation.Quaternion(0.71891504857, -0.0529880479354, 0.691118088949, 0.0520500417212), PyKDL.Vector(0.883081316461, -0.100813768303, 0.95381559114)) k_increase = [ Wrench(Vector3(10.0, 10.0, 10.0), Vector3(5, 5, 5)), Wrench(Vector3(50.0, 50.0, 50.0), Vector3(25, 25, 25)), Wrench(Vector3(250.0, 250.0, 250.0), Vector3(100, 100, 100)), k_high ] for k in k_increase: raw_input("Press Enter to set bigger stiffness...") self.velma.updateTransformations() T_B_Wd = self.velma.T_B_W time = self.velma.getMovementTime(T_B_Wd, max_v_l = 0.02, max_v_r = 0.04) print "moving the wrist to the current pose in " + str(time) + " s..." self.velma.moveWrist(T_B_Wd, time, Wrench(Vector3(40,40,40), Vector3(14,14,14))) rospy.sleep(time) print "done." print "setting stiffness to bigger value" self.velma.moveImpedance(k, 1.0) if self.velma.checkStopCondition(1.1): exit(0) print "done." if True: print "probing the door..." search_radius = 0.02 min_dist = 0.005 probed_points = [] contact_points_B = [] # move to the center point and push the lock x = 0 y = 0 T_O_Od = PyKDL.Frame(self.key_up_O*x + self.key_side_O*y) T_O_Od2 = PyKDL.Frame(self.key_up_O*x + self.key_side_O*y + self.key_axis_O*0.1) self.velma.updateTransformations() T_B_Wd = self.T_B_O_nearHole * T_O_Od * self.T_E_O.Inverse() * self.velma.T_W_E.Inverse() time = self.velma.getMovementTime(T_B_Wd, max_v_l = 0.02, max_v_r = 0.04) raw_input("Press ENTER to movie the wrist in " + str(time) + " s...") self.velma.moveWrist(T_B_Wd, time, Wrench(Vector3(40,40,40), Vector3(14,14,14)), path_tol=path_tol2) status, feedback = self.velma.waitForCartEnd() print "status", status if status.error_code != 0: print "unexpected contact", feedback exit(0) self.velma.updateTransformations() T_B_Wd = self.T_B_O_nearHole * T_O_Od2 * self.T_E_O.Inverse() * self.velma.T_W_E.Inverse() time = self.velma.getMovementTime(T_B_Wd, max_v_l = 0.02, max_v_r = 0.04) raw_input("Press ENTER to movie the wrist in " + str(time) + " s...") self.velma.moveWrist(T_B_Wd, time, Wrench(Vector3(40,40,40), Vector3(14,14,14)), path_tol=path_tol) status, feedback = self.velma.waitForCartEnd() print "status", status if status.error_code != 0: self.velma.updateTransformations() contact_B = self.velma.T_B_W * self.velma.T_W_E * self.T_E_O * self.key_endpoint_O contact_points_B.append(contact_B) m_id = self.pub_marker.publishSinglePointMarker(contact_B, m_id, r=1, g=0, b=0, a=1, namespace='default', frame_id='torso_base', m_type=Marker.SPHERE, scale=Vector3(0.003, 0.003, 0.003), T=None) print feedback else: print "no contact" exit(0) T_O_Od3 = PyKDL.Frame(self.key_axis_O * 5.0/k_high.force.z) self.velma.updateTransformations() T_B_Wref = self.velma.T_B_W T_B_Wd = T_B_Wref * self.velma.T_W_E * self.T_E_O * T_O_Od3 * self.T_E_O.Inverse() * self.velma.T_W_E.Inverse() time = self.velma.getMovementTime(T_B_Wd, max_v_l = 0.02, max_v_r = 0.04) raw_input("Press ENTER to movie the wrist in " + str(time) + " s...") self.velma.moveWrist(T_B_Wd, time, Wrench(Vector3(40,40,40), Vector3(14,14,14)), path_tol=path_tol2) status, feedback = self.velma.waitForCartEnd() print "status", status if status.error_code != 0: print "unexpected high contact force", feedback exit(0) desired_push_dist = 5.0/k_high.force.z self.velma.updateTransformations() contact_B = self.velma.T_B_W * self.velma.T_W_E * self.T_E_O * self.key_endpoint_O contact_points_B.append(contact_B) # move along the spiral hole_found = False spiral = self.generateSpiral(search_radius, min_dist) for pt in spiral: pt_desired_O_in_ref = self.key_endpoint_O pt_contact_O_in_ref = self.T_E_O.Inverse() * self.velma.T_W_E.Inverse() * T_B_Wref.Inverse() * contact_B key_end_depth = PyKDL.dot(pt_contact_O_in_ref-pt_desired_O_in_ref, self.key_axis_O) # print "key_end_depth", key_end_depth T_O_Od4 = PyKDL.Frame(self.key_up_O*pt[0] + self.key_side_O*pt[1] + self.key_axis_O * (5.0/k_high.force.z + key_end_depth)) pt_desired_B = T_B_Wref * self.velma.T_W_E * self.T_E_O * T_O_Od4 * self.key_endpoint_O m_id = self.pub_marker.publishSinglePointMarker(pt_desired_B, m_id, r=0, g=1, b=0, a=1, namespace='default', frame_id='torso_base', m_type=Marker.SPHERE, scale=Vector3(0.003, 0.003, 0.003), T=None) T_B_Wd = T_B_Wref * self.velma.T_W_E * self.T_E_O * T_O_Od4 * self.T_E_O.Inverse() * self.velma.T_W_E.Inverse() time = self.velma.getMovementTime(T_B_Wd, max_v_l = 0.02, max_v_r = 0.04) raw_input("Press ENTER to movie the wrist in " + str(time) + " s...") self.velma.moveWrist(T_B_Wd, time, Wrench(Vector3(40,40,40), Vector3(14,14,14)), path_tol=path_tol) status, feedback = self.velma.waitForCartEnd() print "status", status self.velma.updateTransformations() contact_B = self.velma.T_B_W * self.velma.T_W_E * self.T_E_O * self.key_endpoint_O contact_points_B.append(contact_B) m_id = self.pub_marker.publishSinglePointMarker(contact_B, m_id, r=1, g=0, b=0, a=1, namespace='default', frame_id='torso_base', m_type=Marker.SPHERE, scale=Vector3(0.003, 0.003, 0.003), T=None) # check if the key is in the hole if status.error_code != 0: self.velma.updateTransformations() T_O_Od5 = PyKDL.Frame(self.key_axis_O * 5.0/k_high.force.z) T_B_Wd = self.velma.T_B_W * self.velma.T_W_E * self.T_E_O * T_O_Od5 * self.T_E_O.Inverse() * self.velma.T_W_E.Inverse() time = self.velma.getMovementTime(T_B_Wd, max_v_l = 0.02, max_v_r = 0.04) self.velma.moveWrist(T_B_Wd, time, Wrench(Vector3(40,40,40), Vector3(14,14,14)), path_tol=path_tol2) status, feedback = self.velma.waitForCartEnd() if status.error_code != 0: print "unexpected high contact force", feedback exit(0) self.velma.updateTransformations() T_O_Od6 = PyKDL.Frame(self.key_up_O*0.02 + self.key_axis_O * 5.0/k_high.force.z) T_B_Wd = self.velma.T_B_W * self.velma.T_W_E * self.T_E_O * T_O_Od6 * self.T_E_O.Inverse() * self.velma.T_W_E.Inverse() time = self.velma.getMovementTime(T_B_Wd, max_v_l = 0.02, max_v_r = 0.04) self.velma.moveWrist(T_B_Wd, time, Wrench(Vector3(40,40,40), Vector3(14,14,14)), path_tol=path_tol) status, feedback = self.velma.waitForCartEnd() if status.error_code != 0: print "key is in a hole" hole_found = True break print "hole_found", hole_found T_O_Od3 = PyKDL.Frame(self.key_axis_O * 5.0/k_high.force.z) self.velma.updateTransformations() T_B_Wref = self.velma.T_B_W print "moving the wrist to the current pose to reduce tension..." T_B_Wd = self.velma.T_B_W * self.velma.T_W_E * self.T_E_O * T_O_Od3 * self.T_E_O.Inverse() * self.velma.T_W_E.Inverse() time = self.velma.getMovementTime(T_B_Wd, max_v_l = 0.02, max_v_r = 0.04) raw_input("Press ENTER to movie the wrist in " + str(time) + " s...") self.velma.moveWrist(T_B_Wd, time, Wrench(Vector3(40,40,40), Vector3(14,14,14)), path_tol=path_tol2) status, feedback = self.velma.waitForCartEnd() print "status", status if status.error_code != 0: print "unexpected high contact force", feedback exit(0) print "setting stiffness to bigger value" self.velma.moveImpedance(k_hole_in, 1.0) if self.velma.checkStopCondition(1.1): exit(0) print "done." self.velma.updateTransformations() # calculate the fixed lock frame T_B_L # T_B_L the same orientation as the gripper (z axis pointing towards the door) # and the origin at the key endpoint at the initial penetration T_B_L = self.velma.T_B_W * self.velma.T_W_E * self.T_E_O * PyKDL.Frame(self.key_endpoint_O) penetration_axis_L = PyKDL.Vector(0, 0, 1) # get current contact point contact_B = self.velma.T_B_W * self.velma.T_W_E * self.T_E_O * self.key_endpoint_O contact_L = T_B_L.Inverse() * contact_B contact_max_depth_L = PyKDL.dot(contact_L, penetration_axis_L) contact_depth_L = contact_max_depth_L prev_contact_depth_L = contact_depth_L deepest_contact_L = copy.deepcopy(contact_L) m_id = self.pub_marker.publishSinglePointMarker(contact_B, m_id, r=1, g=0, b=0, a=1, namespace='default', frame_id='torso_base', m_type=Marker.SPHERE, scale=Vector3(0.003, 0.003, 0.003), T=None) central_point_L = PyKDL.Vector() force_key_axis = 17.0 force_key_up = 10.0 force_key_side = 10.0 xi = 7 yi = 7 explore_mode = "get_new" while True: # desired position of the key end self.velma.updateTransformations() T_B_Ocurrent = self.velma.T_B_W * self.velma.T_W_E * self.T_E_O * PyKDL.Frame(self.key_axis_O * (force_key_axis/k_hole_in.force.x)) diff_B = PyKDL.diff(T_B_Ocurrent, self.T_B_O_nearHole) rot_diff_Ocurrent = PyKDL.Frame(T_B_Ocurrent.Inverse().M) * diff_B.rot spiral_hole = self.generateSpiral(0.04, 0.005) T_B_W_shake = [] for pt in spiral_hole: T_B_Od_shake = T_B_Ocurrent * PyKDL.Frame(PyKDL.Rotation.RotZ(rot_diff_Ocurrent.z()-6.0/180.0*math.pi), self.key_up_O * pt[0] + self.key_side_O * pt[1]) T_B_W_shake.append(T_B_Od_shake * self.T_E_O.Inverse() * self.velma.T_W_E.Inverse()) T_B_Od_shake = T_B_Ocurrent * PyKDL.Frame(PyKDL.Rotation.RotZ(rot_diff_Ocurrent.z()+6.0/180.0*math.pi), self.key_up_O * pt[0] + self.key_side_O * pt[1]) T_B_W_shake.append(T_B_Od_shake * self.T_E_O.Inverse() * self.velma.T_W_E.Inverse()) print "shaking..." counter = 0 for T_B_W in T_B_W_shake: counter += 1 time = self.velma.getMovementTime(T_B_W, max_v_l = 0.4, max_v_r = 0.4) # self.velma.moveWrist2(T_B_W * self.velma.T_W_T) # raw_input("Press ENTER to movie the wrist in " + str(time) + " s...") self.velma.moveWrist(T_B_W, time, Wrench(Vector3(40,40,40), Vector3(14,14,14)), path_tol=path_tol_in) status1, feedback = self.velma.waitForCartEnd() print "status", status1 self.velma.updateTransformations() contact_B = self.velma.T_B_W * self.velma.T_W_E * self.T_E_O * self.key_endpoint_O m_id = self.pub_marker.publishSinglePointMarker(contact_B, m_id, r=1, g=0, b=0, a=1, namespace='default', frame_id='torso_base', m_type=Marker.SPHERE, scale=Vector3(0.003, 0.003, 0.003), T=None) pt_desired_B = T_B_W * self.velma.T_W_E * self.T_E_O * self.key_endpoint_O m_id = self.pub_marker.publishSinglePointMarker(pt_desired_B, m_id, r=0, g=1, b=0, a=1, namespace='default', frame_id='torso_base', m_type=Marker.SPHERE, scale=Vector3(0.003, 0.003, 0.003), T=None) contact_L = T_B_L.Inverse() * contact_B contact_depth_L = PyKDL.dot(contact_L, penetration_axis_L) if contact_depth_L > contact_max_depth_L+0.0001: deepest_contact_L = copy.deepcopy(contact_L) contact_max_depth_L = contact_depth_L print "contact_depth_L: %s"%(contact_depth_L) explore_mode = "push" break if status1.error_code != 0: break print "done." score = contact_depth_L - prev_contact_depth_L prev_contact_depth_L = contact_depth_L if status1.error_code != 0 or counter == len(T_B_W_shake): break print "moving the key to the current pose..." self.velma.updateTransformations() T_B_Wd = self.velma.T_B_W time = self.velma.getMovementTime(T_B_Wd, max_v_l = 0.1, max_v_r = 0.1) self.velma.moveWrist(T_B_Wd, time, Wrench(Vector3(40,40,40), Vector3(14,14,14)), path_tol=path_tol_in2) status1, feedback = self.velma.waitForCartEnd() print "status", status1 if status1.error_code != 0: exit(0) print "moving the key to the current pose..." self.velma.updateTransformations() T_B_Ocurrent = self.velma.T_B_W * self.velma.T_W_E * self.T_E_O T_B_Od = T_B_Ocurrent * PyKDL.Frame(self.key_axis_O * (-2.0/k_hole_in.force.x)) T_B_Wd = T_B_Od * self.T_E_O.Inverse() * self.velma.T_W_E.Inverse() time = self.velma.getMovementTime(T_B_Wd, max_v_l = 0.1, max_v_r = 0.1) raw_input("Press ENTER to movie the wrist in " + str(time) + " s...") self.velma.moveWrist(T_B_Wd, time, Wrench(Vector3(40,40,40), Vector3(14,14,14)), path_tol=path_tol_in) status1, feedback = self.velma.waitForCartEnd() print "status", status1 if status1.error_code != 0: exit(0) self.velma.moveHand([q_grasp[0]-q_pre, q_grasp[1]-q_pre, q_grasp[2]-q_pre, q_grasp[3]], hv, ht, 5000, True) rospy.sleep(2) print "setting stiffness to bigger value" self.velma.moveImpedance(k_high, 1.0) if self.velma.checkStopCondition(1.1): exit(0) print "done." T_B_Wd = T_B_Ocurrent * self.T_E_Orot.Inverse() * self.velma.T_W_E.Inverse() time = self.velma.getMovementTime(T_B_Wd, max_v_l = 0.02, max_v_r = 0.02) raw_input("Press ENTER to movie the wrist in " + str(time) + " s...") self.velma.moveWrist(T_B_Wd, time, Wrench(Vector3(40,40,40), Vector3(14,14,14)), path_tol=path_tol2) status1, feedback = self.velma.waitForCartEnd() print "status", status1 if status1.error_code != 0: exit(0) self.velma.moveHand([q_grasp[0]+q_pre, q_grasp[1]+q_pre, q_grasp[2]+q_pre, q_grasp[3]], hv, ht, 5000, True) rospy.sleep(5) # m_id = self.pub_marker.publishSinglePointMarker(T_B_L * PyKDL.Vector(0.01*xi+ori_map_vis_offset[0], 0.01*yi+ori_map_vis_offset[1], score), m_id, r=0, g=0, b=1, a=0.5, namespace='default', frame_id='torso_base', m_type=Marker.SPHERE, scale=Vector3(0.003, 0.003, 0.003), T=None) # m_id = self.pub_marker.publishSinglePointMarker(T_B_L * PyKDL.Vector(0.01*xi+ori_map_vis_offset[0], 0.01*yi+ori_map_vis_offset[1], ori_map[xi][yi][1]), m_id, r=0, g=1, b=0, a=0.5, namespace='default', frame_id='torso_base', m_type=Marker.SPHERE, scale=Vector3(0.003, 0.003, 0.003), T=None) exit(0)
def spin(self): if False: while not rospy.is_shutdown(): pt = PyKDL.Vector(random.uniform(-1, 1), random.uniform(-1, 1), 0) # line = (PyKDL.Vector(0,0,0), PyKDL.Vector(1,0,0)) line = (PyKDL.Vector(random.uniform(-1, 1),random.uniform(-1, 1),0), PyKDL.Vector(random.uniform(-1, 1),random.uniform(-1, 1),0)) line2 = (PyKDL.Vector(random.uniform(-1, 1),random.uniform(-1, 1),0), PyKDL.Vector(random.uniform(-1, 1),random.uniform(-1, 1),0)) # dist, p1, p2 = self.distanceLinePoint(line, pt) dist, p1, p2 = self.distanceLines(line, line2) m_id = 0 m_id = self.pub_marker.publishVectorMarker(line[0], line[1], m_id, 1, 0, 0, frame='world', namespace='default', scale=0.02) m_id = self.pub_marker.publishVectorMarker(line2[0], line2[1], m_id, 0, 1, 0, frame='world', namespace='default', scale=0.02) m_id = self.pub_marker.publishVectorMarker(p1, p2, m_id, 1, 1, 1, frame='world', namespace='default', scale=0.02) # print line, pt, dist print line, line2, dist raw_input(".") exit(0) simulation = True rospack = rospkg.RosPack() env_file=rospack.get_path('velma_scripts') + '/data/jar/cabinet_test.env.xml' srdf_path=rospack.get_path('velma_description') + '/robots/' print "creating interface for Velma..." # create the interface for Velma robot velma = Velma() print "done." ## velma.waitForInit() velma.fk_ik_solver.createSegmentToJointMap(velma.getJointStatesVectorNames(), velma.getInactiveJointStatesVectorNames()) q = velma.getJointStatesVector() iq = velma.getInactiveJointStatesVector() jac1 = PyKDL.Jacobian(len(q)) jac2 = PyKDL.Jacobian(len(q)) velma.fk_ik_solver.getJacobiansForPairX(jac1, jac2, "left_HandPalmLink", PyKDL.Vector(), "torso_link1", PyKDL.Vector(0.1,0,0), q, iq) print "jac1" print jac1 print "jac2" print jac2 exit(0) ## # # Initialise Openrave # openrave = openraveinstance.OpenraveInstance() openrave.startOpenraveURDF(env_file=env_file, viewer=True) openrave.readRobot(srdf_path=srdf_path) openrave.setCamera(PyKDL.Vector(2.0, 0.0, 2.0), PyKDL.Vector(0.60, 0.0, 1.10)) velma.waitForInit() openrave.updateRobotConfigurationRos(velma.js_pos) non_adj_links_ids = openrave.robot_rave.GetNonAdjacentLinks() velma.switchToJoint() lim_bo_soft, lim_up_soft = velma.getJointSoftLimitsVectors() lim_bo, lim_up = velma.getJointLimitsVectors() velma.fk_ik_solver.createJacobianFkSolvers('torso_base', 'right_HandPalmLink', velma.getJointStatesVectorNames()) velma.fk_ik_solver.createJacobianFkSolvers('torso_base', 'left_HandPalmLink', velma.getJointStatesVectorNames()) velma.fk_ik_solver.createSegmentToJointMap(velma.getJointStatesVectorNames(), velma.getInactiveJointStatesVectorNames()) print velma.getJointStatesVectorNames() r_HAND_targets = [ # PyKDL.Frame(PyKDL.Vector(0.5,0,1.8)), # PyKDL.Frame(PyKDL.Rotation.RotY(170.0/180.0*math.pi), PyKDL.Vector(0.5,0,1.6)), PyKDL.Frame(PyKDL.Rotation.RotY(90.0/180.0*math.pi), PyKDL.Vector(0.2,0.0,1.0)), PyKDL.Frame(PyKDL.Rotation.RotY(90.0/180.0*math.pi), PyKDL.Vector(0.2,-0.5,1.0)), ] l_HAND_targets = [ # PyKDL.Frame(PyKDL.Vector(0.5,0,1.8)), # PyKDL.Frame(PyKDL.Rotation.RotY(170.0/180.0*math.pi), PyKDL.Vector(0.5,0,1.6)), PyKDL.Frame(PyKDL.Rotation.RotY(90.0/180.0*math.pi) * PyKDL.Rotation.RotZ(180.0/180.0*math.pi), PyKDL.Vector(0.2,0.0,1.0)), PyKDL.Frame(PyKDL.Rotation.RotY(90.0/180.0*math.pi) * PyKDL.Rotation.RotZ(180.0/180.0*math.pi), PyKDL.Vector(0.2,0.5,1.0)), ] target_idx = 0 r_HAND_target = r_HAND_targets[target_idx] l_HAND_target = l_HAND_targets[target_idx] target_idx += 1 last_time = rospy.Time.now() q = velma.getJointStatesVector() q_names = velma.getJointStatesVectorNames() iq = velma.getInactiveJointStatesVector() counter = 0 while not rospy.is_shutdown(): if counter > 300: r_HAND_target = r_HAND_targets[target_idx] l_HAND_target = l_HAND_targets[target_idx] target_idx = (target_idx + 1)%len(r_HAND_targets) counter = 0 counter += 1 time_elapsed = rospy.Time.now() - last_time J_JLC = np.matrix(numpy.zeros( (len(q), len(q)) )) delta_V_JLC = np.empty(len(q)) for q_idx in range(len(q)): if q[q_idx] < lim_bo_soft[q_idx]: delta_V_JLC[q_idx] = q[q_idx] - lim_bo_soft[q_idx] J_JLC[q_idx,q_idx] = min(1.0, 10*abs(q[q_idx] - lim_bo_soft[q_idx]) / abs(lim_bo[q_idx] - lim_bo_soft[q_idx])) elif q[q_idx] > lim_up_soft[q_idx]: delta_V_JLC[q_idx] = q[q_idx] - lim_up_soft[q_idx] J_JLC[q_idx,q_idx] = min(1.0, 10*abs(q[q_idx] - lim_up_soft[q_idx]) / abs(lim_up[q_idx] - lim_up_soft[q_idx])) else: delta_V_JLC[q_idx] = 0.0 J_JLC[q_idx,q_idx] = 0.0 J_JLC_inv = np.linalg.pinv(J_JLC) N_JLC = identityMatrix(len(q)) - (J_JLC_inv * J_JLC) N_JLC_inv = np.linalg.pinv(N_JLC) v_max_JLC = 20.0/180.0*math.pi kp_JLC = 1.0 dx_JLC_des = kp_JLC * delta_V_JLC # min(1.0, v_max_JLC/np.linalg.norm(dx_JLC_des)) if v_max_JLC > np.linalg.norm(dx_JLC_des): vv_JLC = 1.0 else: vv_JLC = v_max_JLC/np.linalg.norm(dx_JLC_des) dx_JLC_ref = - vv_JLC * dx_JLC_des # right hand J_r_HAND = velma.fk_ik_solver.getJacobian('torso_base', 'right_HandPalmLink', q) J_r_HAND_inv = np.linalg.pinv(J_r_HAND) N_r_HAND = identityMatrix(len(q)) - (J_r_HAND_inv * J_r_HAND) T_B_E = velma.fk_ik_solver.calculateFk2('torso_base', 'right_HandPalmLink', q) r_HAND_current = T_B_E r_HAND_diff = PyKDL.diff(r_HAND_target, r_HAND_current) delta_V_HAND = np.empty(6) delta_V_HAND[0] = r_HAND_diff.vel[0] delta_V_HAND[1] = r_HAND_diff.vel[1] delta_V_HAND[2] = r_HAND_diff.vel[2] delta_V_HAND[3] = r_HAND_diff.rot[0] delta_V_HAND[4] = r_HAND_diff.rot[1] delta_V_HAND[5] = r_HAND_diff.rot[2] v_max_HAND = 2.0 kp_HAND = 2.0 dx_HAND_des = kp_HAND * delta_V_HAND if v_max_HAND > np.linalg.norm(dx_HAND_des): vv_HAND = 1.0 else: vv_HAND = v_max_HAND/np.linalg.norm(dx_HAND_des) dx_r_HAND_ref = - vv_HAND * dx_HAND_des # left hand J_l_HAND = velma.fk_ik_solver.getJacobian('torso_base', 'left_HandPalmLink', q) J_l_HAND_inv = np.linalg.pinv(J_l_HAND) N_l_HAND = identityMatrix(len(q)) - (J_l_HAND_inv * J_l_HAND) T_B_E = velma.fk_ik_solver.calculateFk2('torso_base', 'left_HandPalmLink', q) l_HAND_current = T_B_E l_HAND_diff = PyKDL.diff(l_HAND_target, l_HAND_current) delta_V_HAND = np.empty(6) delta_V_HAND[0] = l_HAND_diff.vel[0] delta_V_HAND[1] = l_HAND_diff.vel[1] delta_V_HAND[2] = l_HAND_diff.vel[2] delta_V_HAND[3] = l_HAND_diff.rot[0] delta_V_HAND[4] = l_HAND_diff.rot[1] delta_V_HAND[5] = l_HAND_diff.rot[2] v_max_HAND = 2.0 kp_HAND = 2.0 dx_HAND_des = kp_HAND * delta_V_HAND if v_max_HAND > np.linalg.norm(dx_HAND_des): vv_HAND = 1.0 else: vv_HAND = v_max_HAND/np.linalg.norm(dx_HAND_des) dx_l_HAND_ref = - vv_HAND * dx_HAND_des link_collision_map = {} openrave.updateRobotConfiguration(q, velma.getJointStatesVectorNames()) if True: openrave.switchCollisionModel("velmasimplified1") col_chk = openrave.env.GetCollisionChecker() col_opt = col_chk.GetCollisionOptions() col_chk.SetCollisionOptions(0x04) # CO_Contacts(0x04), CO_AllLinkCollisions(0x20) total_contacts = 0 for link1_idx, link2_idx in non_adj_links_ids: link1 = openrave.robot_rave.GetLinks()[link1_idx] link2 = openrave.robot_rave.GetLinks()[link2_idx] report = CollisionReport() if col_chk.CheckCollision(link1=link1, link2=link2, report=report): T_L1_W = conv.OpenraveToKDL(link1.GetTransform()).Inverse() TR_L1_W = PyKDL.Frame(T_L1_W.M) T_L2_W = conv.OpenraveToKDL(link2.GetTransform()).Inverse() TR_L2_W = PyKDL.Frame(T_L2_W.M) swapped = False if link1_idx > link2_idx: link1_idx, link2_idx = link2_idx, link1_idx swapped = True if not (link1_idx, link2_idx) in link_collision_map: link_collision_map[(link1_idx, link2_idx)] = [] for contact in report.contacts: pos_W = PyKDL.Vector(contact.pos[0], contact.pos[1], contact.pos[2]) norm_W = PyKDL.Vector(contact.norm[0], contact.norm[1], contact.norm[2]) if swapped: link_collision_map[(link1_idx, link2_idx)].append( (pos_W, -norm_W, T_L2_W * pos_W, T_L1_W * pos_W, TR_L2_W * (-norm_W), TR_L1_W * (-norm_W), contact.depth) ) else: link_collision_map[(link1_idx, link2_idx)].append( (pos_W, norm_W, T_L1_W * pos_W, T_L2_W * pos_W, TR_L1_W * norm_W, TR_L2_W * norm_W, contact.depth) ) total_contacts += len(report.contacts) col_chk.SetCollisionOptions(col_opt) print "links in contact:", len(link_collision_map), "total contacts:", total_contacts omega_col = np.matrix(np.zeros( (len(q),1) )) Ncol = identityMatrix(len(q)) for link1_idx, link2_idx in link_collision_map: link1_name = openrave.robot_rave.GetLinks()[link1_idx].GetName() link2_name = openrave.robot_rave.GetLinks()[link2_idx].GetName() # l1_parent = velma.fk_ik_solver.isParent(link1_name, link2_name) # l2_parent = velma.fk_ik_solver.isParent(link2_name, link1_name) affected_dof = velma.fk_ik_solver.getAffectedDof(link1_name, link2_name) # print "affected dof:" # for dof_idx in affected_dof: # print q_names[dof_idx] contacts = link_collision_map[ (link1_idx, link2_idx) ] for c in contacts: pos_W, norm_W, pos_L1, pos_L2, norm_L1, norm_L2, depth = c m_id = self.pub_marker.publishVectorMarker(pos_W, pos_W + norm_W*0.05, 1, 1, 0, 0, frame='world', namespace='default', scale=0.005) if depth < 0: print "ERROR: depth < 0:", depth exit(0) # print link1_name, link2_name jac1 = PyKDL.Jacobian(len(q)) velma.fk_ik_solver.getJacobianForX(jac1, link1_name, pos_L1, q, iq) jac2 = PyKDL.Jacobian(len(q)) velma.fk_ik_solver.getJacobianForX(jac2, link2_name, pos_L2, q, iq) # repulsive velocity V_max = 0.1 depth_max = 0.002 if depth > depth_max: depth = depth_max Vrep = V_max * depth * depth / (depth_max * depth_max) # the mapping between motions along contact normal and the Cartesian coordinates e1 = norm_L1 e2 = norm_L2 Jd1 = np.matrix([e1[0], e1[1], e1[2]]) Jd2 = np.matrix([e2[0], e2[1], e2[2]]) # print "Jd1.shape", Jd1.shape # rewrite the linear part of the jacobian jac1_mx = np.matrix(np.zeros( (3, len(q)) )) jac2_mx = np.matrix(np.zeros( (3, len(q)) )) for q_idx in range(len(q)): col1 = jac1.getColumn(q_idx) col2 = jac2.getColumn(q_idx) for row_idx in range(3): jac1_mx[row_idx, q_idx] = col1[row_idx] jac2_mx[row_idx, q_idx] = col2[row_idx] # print "jac1_mx, jac2_mx" # print jac1_mx # print jac2_mx # print "jac1_mx.shape", jac1_mx.shape Jcol1 = Jd1 * jac1_mx Jcol2 = Jd2 * jac2_mx # print "Jcol2.shape", Jcol2.shape Jcol = np.matrix(np.zeros( (2, len(q)) )) for q_idx in range(len(q)): if Jcol1[0, q_idx] < 0.000000001 or not q_idx in affected_dof:#l1_parent: Jcol1[0, q_idx] = 0.0 if Jcol2[0, q_idx] < 0.000000001 or not q_idx in affected_dof:#l2_parent: Jcol2[0, q_idx] = 0.0 Jcol[0, q_idx] = Jcol1[0, q_idx] Jcol[1, q_idx] = Jcol2[0, q_idx] # print Jcol.shape # print "Jcol" # print Jcol Jcol_pinv = np.linalg.pinv(Jcol) # Jcol_pinv = Jcol.transpose() # print "Jcol_pinv" # print Jcol_pinv # Ncol1 = identityMatrix(len(q)) - np.linalg.pinv(Jcol1) * Jcol1 # Ncol2 = identityMatrix(len(q)) - np.linalg.pinv(Jcol2) * Jcol2 # Ncol = Ncol * Ncol1 # Ncol = Ncol * Ncol2 # omega_col += np.linalg.pinv(Jcol1) * (-Vrep) # omega_col += np.linalg.pinv(Jcol2) * (Vrep) # continue # activation = min(1.0, depth/0.001) # a_des = np.matrix(np.zeros( (len(q),len(q)) )) # a_des[0,0] = a_des[1,1] = 1.0#activation # U, S, V = numpy.linalg.svd(Jcol, full_matrices=True, compute_uv=True) # print "V" # print V # print "S" # print S Ncol12 = identityMatrix(len(q)) - Jcol_pinv * Jcol # Ncol12 = identityMatrix(len(q)) - Jcol.transpose() * (Jcol_pinv).transpose() # Ncol12 = identityMatrix(len(q)) - (V * a_des * V.transpose()) # Ncol12 = identityMatrix(len(q)) - (Jcol.transpose() * a_des * Jcol) Ncol = Ncol * Ncol12 Vrep1 = -Vrep Vrep2 = Vrep # if l1_parent: # Vrep1 = 0.0 # if l2_parent: # Vrep2 = 0.0 d_omega = Jcol_pinv * np.matrix([-Vrep, Vrep]).transpose() # print "d_omega", d_omega # print "Vrep", Vrep # print q_names # print "Jcol", Jcol # print "Jcol_pinv", Jcol_pinv # print "Jcol_pinv * Jcol", Jcol_pinv * Jcol # print "Jcol * Jcol_pinv", Jcol * Jcol_pinv # print "a_des", a_des omega_col += d_omega # print "depth", depth # raw_input(".") # print "omega_col", omega_col # print dx_HAND_ref omega_r_HAND = (J_r_HAND_inv * np.matrix(dx_r_HAND_ref).transpose()) omega_l_HAND = (J_l_HAND_inv * np.matrix(dx_l_HAND_ref).transpose()) Ncol_inv = np.linalg.pinv(Ncol) N_r_HAND_inv = np.linalg.pinv(N_r_HAND) # omega = J_JLC_inv * np.matrix(dx_JLC_ref).transpose() + N_JLC_inv * (omega_col + Ncol_inv * (omega_r_HAND))# + N_r_HAND_inv * omega_l_HAND)) omega = J_JLC_inv * np.matrix(dx_JLC_ref).transpose() + N_JLC.transpose() * (omega_col + Ncol.transpose() * (omega_r_HAND))# + N_r_HAND.transpose() * omega_l_HAND)) # print "omega", omega # print "dx_JLC_ref" # print dx_JLC_ref # print "dx_HAND_ref" # print dx_HAND_ref omega_vector = np.empty(len(q)) for q_idx in range(len(q)): omega_vector[q_idx] = omega[q_idx][0] q += omega_vector * 0.002 if time_elapsed.to_sec() > 0.2: last_time = rospy.Time.now() velma.moveJoint(q, velma.getJointStatesVectorNames(), 0.05, start_time=0.14)
def spin(self): # create the jar model jar = Jar(self.pub_marker) # start thread for jar tf publishing and for visualization thread.start_new_thread(jar.tfBroadcasterLoop, (0.5, 1)) # look for jar marker T_B_J = self.getJarMarkerPose() if T_B_J == None: print "Cound not find jar marker." exit(0) # jar marker is found, add observation to the jar model print "Found jar marker." jar.addMarkerObservation(T_B_J) # calculate angle between jar_axis and vertical vector (z) in B jar_up_angle = 180.0*getAngle(PyKDL.Frame(T_B_J.M)*PyKDL.Vector(0,0,1), PyKDL.Vector(0,0,1))/math.pi print "angle between jar_axis and vertical vector (z) in B: %s deg."%(jar_up_angle) # stiffness for jar touching k_jar_touching = Wrench(Vector3(1200.0, 1200.0, 1200.0), Vector3(300.0, 300.0, 300.0)) k_jar_cap_gripping = Wrench(Vector3(1200.0, 1200.0, 1200.0), Vector3(300.0, 300.0, 300.0)) k_jar_cap_rotating = Wrench(Vector3(100.0, 100.0, 1200.0), Vector3(300.0, 300.0, 300.0)) # create the robot interface velma = Velma() velma.updateTransformations() # reset the gripper self.resetGripper(velma) # start with very low stiffness print "setting stiffness to very low value" velma.moveImpedance(velma.k_error, 0.5) if velma.checkStopCondition(0.5): exit(0) raw_input("Press Enter to continue...") if velma.checkStopCondition(): exit(0) velma.updateTransformations() velma.updateAndMoveTool( velma.T_W_E, 1.0 ) if velma.checkStopCondition(1.0): exit(0) raw_input("Press Enter to continue...") print "setting stiffness to bigger value" velma.moveImpedance(k_jar_touching, 3.0) if velma.checkStopCondition(3.0): exit(0) if False: velma.moveAroundQ5Singularity(5.0/180.0*math.pi) print "aborted_on_q5_singularity: %s"%(velma.aborted_on_q5_singularity) exit(0) if False: velma.updateTransformations() i = 6 omega = 5.0/180.0*math.pi if velma.q_r[i] > 0.0: angle = -0.0/180.0*math.pi omega = -math.fabs(omega) else: angle = 0.0/180.0*math.pi omega = math.fabs(omega) traj, times = velma.generateTrajectoryInJoint(i, -velma.q_r[i]+angle, omega) velma.moveWrist2(traj[-1]*velma.T_W_T) raw_input("Press Enter to move the robot in " + str(times[-1]) + " s...") if velma.checkStopCondition(): exit(0) velma.moveWristTraj(traj, times, Wrench(Vector3(20,20,20), Vector3(4,4,4)), abort_on_q5_singularity=False) velma.checkStopCondition(times[-1]+0.5) exit(0) velma.calibrateTactileSensors() print "generating grasps..." # calculate the best grip for the jar cap jarCapForceGrasps = self.generateGrasps_jarCapForce(velma, jar, 70.0/180.0*math.pi) print "generated %s grasps: %s"%(len(jarCapForceGrasps), jarCapForceGrasps[0].name) # calculate the best grip for the jar side touching jarCapPinchGrasps = self.generateGrasps_jarCapPinch(velma, jar) print "generated %s grasps: %s"%(len(jarCapPinchGrasps), jarCapPinchGrasps[0].name) # calculate the best grip for the jar top touching jarCapTopTouchGrasps = self.GenerateGrasps_jarCapTopTouch(velma) print "generated %s grasps: %s"%(len(jarCapTopTouchGrasps), jarCapTopTouchGrasps[0].name) self.grasps = jarCapForceGrasps + jarCapPinchGrasps + jarCapTopTouchGrasps print "Total %s grasps"%(len(self.grasps)) # get contact points observations # we want to touch the jar's cap with the middle of the 5th row of tactile matrix if True: actions = [] actions.append(JarLidTopTouchAction(velma)) actions.append(JarLidPinchGraspAction(velma)) actions.append(RemoveJarLidAction(velma)) actions.append(RemoveJarLidAction(velma)) actions.append(RemoveJarLidAction(velma)) actions.append(RemoveJarLidAction(velma)) actions.append(RemoveJarLidAction(velma)) self.resetContacts() jar.resetContactObservations() action_index = 0 last_k = copy.deepcopy(k_jar_touching) last_q = [0.0, 0.0, 0.0, 0.0] for action in actions: # get the fresh pose of the jar T_B_JC = copy.deepcopy(jar.getJarCapFrame()) action.setTarget(T_B_JC) stop = False action.recalculateRelations(self.grasps) rel_index = 0 while not stop: rel = action.relations[rel_index] print "rel: %s"%(rel.name) if rel.grasp.q_pre != None and (last_q[0] != rel.grasp.q_pre[0] or last_q[1] != rel.grasp.q_pre[1] or last_q[2] != rel.grasp.q_pre[2] or last_q[3] != rel.grasp.q_pre[3]): last_q = copy.deepcopy(rel.grasp.q_pre) velma.move_hand_client("right", rel.grasp.q_pre, t=rel.grasp.t_pre ) if velma.checkStopCondition(3.0): exit(0) if rel.k_pre != None and (last_k.force.x != rel.k_pre.force.x or last_k.force.y != rel.k_pre.force.y or last_k.force.z != rel.k_pre.force.z or last_k.torque.x != rel.k_pre.torque.x or last_k.torque.y != rel.k_pre.torque.y or last_k.torque.z != rel.k_pre.torque.z): last_k = copy.deepcopy(rel.k_pre) print "moveImpedance(%s)"%(rel.k_pre) velma.moveImpedance(rel.k_pre, 2.0) if velma.checkStopCondition(2.0): exit(0) T_B_Wd = action.T_B_Go * rel.T_Go_Ge * rel.grasp.T_Ge_E * velma.T_E_W duration = velma.getMovementTime(T_B_Wd, max_v_l = rel.max_v_l, max_v_r = rel.max_v_r) velma.moveWrist2(T_B_Wd*velma.T_W_T) raw_input("Press Enter to move the robot in " + str(duration) + " s...") if velma.checkStopCondition(): exit(0) velma.moveWrist(T_B_Wd, duration, Wrench(Vector3(20,20,20), Vector3(4,4,4)), abort_on_q5_singularity=True, abort_on_q5_q6_self_collision=True) if True in rel.stop_on_contact: # wait for contact contacts = velma.waitForFirstContact(80, duration, emergency_stop=True, f1=True, f2=True, f3=True, palm=False) if rel.collect_contacts: if len(contacts) < 1: return for c in contacts: self.addContact(c) # print "found contact point" else: if rel.collect_contacts: end_time = rospy.Time.now() + rospy.Duration(duration) while rospy.Time.now() < end_time: contacts = velma.getContactPoints(200, f1=True, f2=True, f3=True, palm=False) for c in contacts: self.addContact(c) if velma.checkStopCondition(0.01): exit(0) if velma.aborted_on_q5_singularity: break else: if velma.checkStopCondition(duration): exit(0) aborted = False if velma.aborted_on_q5_singularity: print "moving away from singularity in q_5..." velma.moveAwayQ5Singularity(20.0/180.0*math.pi, T_B_Wd) aborted = True if velma.aborted_on_q5_q6_self_collision: print "moving away from self collision in q_5 and q_6..." closest_sector = velma.getClosestQ5Q6SpaceSector(velma.q_r[5], velma.q_r[6]) print "closest sector: %s"%(closest_sector) q5 = (velma.q5_q6_restricted_area[closest_sector][0]+velma.q5_q6_restricted_area[closest_sector][1])/2.0 q6 = (velma.q5_q6_restricted_area[closest_sector][2]+velma.q5_q6_restricted_area[closest_sector][3])/2.0 print "q_5: %s q_6: %s"%(q5,q6) traj, times = velma.generateTrajectoryInQ5Q6(q5,q6,5.0/180.0*math.pi) velma.moveWrist2(traj[-1]*velma.T_W_T) raw_input("Press Enter to move the robot in " + str(times[-1]) + " s...") if velma.checkStopCondition(): exit(0) velma.moveWristTraj(traj, times, Wrench(Vector3(20,20,20), Vector3(4,4,4)), abort_on_q5_singularity=False) velma.checkStopCondition(times[-1]+0.5) aborted = True if aborted: next_actions = rel.on_failure.split(',') else: next_actions = rel.on_success.split(',') if rel.grasp.q != None and (last_q[0] != rel.grasp.q[0] or last_q[1] != rel.grasp.q[1] or last_q[2] != rel.grasp.q[2] or last_q[3] != rel.grasp.q[3]): last_q = copy.deepcopy(rel.grasp.q) velma.move_hand_client("right", rel.grasp.q, t=rel.grasp.t ) if rel.collect_contacts: end_time = rospy.Time.now() + rospy.Duration(3.0) while rospy.Time.now() < end_time: contacts = velma.getContactPoints(200, f1=True, f2=True, f3=True, palm=False) if velma.checkStopCondition(0.01): exit(0) for c in contacts: self.addContact(c) else: if velma.checkStopCondition(3.0): exit(0) if rel.k_post != None and (last_k.force.x != rel.k_post.force.x or last_k.force.y != rel.k_post.force.y or last_k.force.z != rel.k_post.force.z or last_k.torque.x != rel.k_post.torque.x or last_k.torque.y != rel.k_post.torque.y or last_k.torque.z != rel.k_post.torque.z): last_k = copy.deepcopy(rel.k_post) print "moveImpedance(%s)"%(rel.k_post) velma.moveImpedance(rel.k_post, 2.0) if velma.checkStopCondition(2.0): exit(0) print "aborted %s next_actions: %s"%(aborted, next_actions) for next in next_actions: if next == "RECALCULATE": action.recalculateRelations(self.grasps) elif next == "FAILURE": exit(0) elif next == "SUCCESS": if rel.grasp.q != None and (last_q[0] != rel.grasp.q[0] or last_q[1] != rel.grasp.q[1] or last_q[2] != rel.grasp.q[2] or last_q[3] != rel.grasp.q[3]): last_q = copy.deepcopy(rel.grasp.q) velma.move_hand_client("right", rel.grasp.q, t=rel.grasp.t) if rel.collect_contacts: end_time = rospy.Time.now() + rospy.Duration(3.0) while rospy.Time.now() < end_time: contacts = velma.getContactPoints(200, f1=True, f2=True, f3=True, palm=False) if velma.checkStopCondition(0.01): exit(0) for c in contacts: self.addContact(c) else: if velma.checkStopCondition(3.0): exit(0) if rel.k_post != None and (last_k.force.x != rel.k_post.force.x or last_k.force.y != rel.k_post.force.y or last_k.force.z != rel.k_post.force.z or last_k.torque.x != rel.k_post.torque.x or last_k.torque.y != rel.k_post.torque.y or last_k.torque.z != rel.k_post.torque.z): last_k = copy.deepcopy(rel.k_post) velma.moveImpedance(rel.k_post, 2.0) if velma.checkStopCondition(2.0): exit(0) stop = True break elif action.getRelIndex(next) >= 0: rel_index = action.getRelIndex(next) break else: print "error: could not find relation: %s"%(next) exit(0) if action_index == 0: for c in self.contacts: jar.addContactObservation(c) jar.processContactObservationsForTop() jar.drawContactObservations() self.resetContacts() jar.resetContactObservations() elif action_index == 1: used_points = [False for c in self.contacts] index = 0 for c in self.contacts: if used_points[index]: index += 1 continue mean_point = PyKDL.Vector() mean_count = 0 index_2 = 0 for d in self.contacts: if (c-d).Norm() < 0.02: mean_point += d mean_count += 1 used_points[index_2] = True index_2 += 1 if mean_count > 0: jar.addContactObservation(mean_point*(1.0/mean_count)) # now, we have very good pose of the jar jar.processContactObservations() jar.drawContactObservations() else: contacts = velma.getContactPoints(200, f1=True, f2=True, f3=True, palm=False) if len(contacts) > 0: print "the jar is opened" break for c in self.contacts: jar.addContactObservation(c) jar.processContactObservationsForTop(top_offset=-0.02) jar.drawContactObservations() jar.resetContactObservations() used_points = [False for c in self.contacts] index = 0 for c in self.contacts: if used_points[index]: index += 1 continue mean_point = PyKDL.Vector() mean_count = 0 index_2 = 0 for d in self.contacts: if (c-d).Norm() < 0.02: mean_point += d mean_count += 1 used_points[index_2] = True index_2 += 1 if mean_count > 0: jar.addContactObservation(mean_point*(1.0/mean_count)) # now, we have very good pose of the jar jar.processContactObservations() # jar.drawContactObservations() action_index += 1 # jar.drawContactObservations() rospy.sleep(2.0) exit(0)
def spin(self): print "creating interface for Velma..." # create the interface for Velma robot velma = Velma() rospy.sleep(0.5) print "done." hv = [1.2, 1.2, 1.2, 1.2] ht = [3000, 3000, 3000, 3000] velma.moveHandLeft([15.0/180.0*math.pi, 15.0/180.0*math.pi, 15.0/180.0*math.pi, 0], hv, ht, 5000, True) print "wainting for moveHandLeft complete..." result = velma.waitForHandLeft() print "result:", result.error_code velma.moveHandRight([15.0/180.0*math.pi, 15.0/180.0*math.pi, 15.0/180.0*math.pi, 0], hv, ht, 5000, True) print "wainting for moveHandRight complete..." result = velma.waitForHandRight() print "result:", result.error_code velma.moveHandLeft([70.0/180.0*math.pi, 70.0/180.0*math.pi, 70.0/180.0*math.pi, 0], hv, ht, 5000, True) print "wainting for moveHandLeft complete..." result = velma.waitForHandLeft() print "result:", result.error_code velma.switchToCart() velma.updateTransformations() T_B_Wr1 = velma.T_B_Wr T_B_Wr2 = PyKDL.Frame(PyKDL.Vector(0.2,0,0)) * T_B_Wr1 velma.moveWristRight(T_B_Wr2, 3.0, Wrench(Vector3(40,40,40), Vector3(14,14,14)), start_time=0.1) print "wainting for moveWristRight complete..." result = velma.waitForWristRight() print "result:", result.error_code velma.moveWristRight(T_B_Wr1, 3.0, Wrench(Vector3(40,40,40), Vector3(14,14,14)), start_time=0.1) print "wainting for moveWristRight complete..." result = velma.waitForWristRight() print "result:", result.error_code velma.updateTransformations() T_B_Wl1 = velma.T_B_Wl T_B_Wl2 = PyKDL.Frame(PyKDL.Vector(0,0,0.2)) * T_B_Wl1 velma.moveWristLeft(T_B_Wl2, 3.0, Wrench(Vector3(40,40,40), Vector3(14,14,14)), start_time=0.1) print "wainting for moveWristLeft complete..." result = velma.waitForWristLeft() print "result:", result.error_code velma.moveWristLeft(T_B_Wl1, 3.0, Wrench(Vector3(40,40,40), Vector3(14,14,14)), start_time=0.1) print "wainting for moveWristLeft complete..." result = velma.waitForWristLeft() print "result:", result.error_code raw_input("Press ENTER to exit...")
class TestOrOctomap: """ """ def KDLToOpenrave(self, T): ret = numpy.array([ [T.M[0,0], T.M[0,1], T.M[0,2], T.p.x()], [T.M[1,0], T.M[1,1], T.M[1,2], T.p.y()], [T.M[2,0], T.M[2,1], T.M[2,2], T.p.z()], [0, 0, 0, 1]]) return ret def OpenraveToKDL(self, T): rot = PyKDL.Rotation(T[0][0],T[0][1],T[0][2],T[1][0],T[1][1],T[1][2],T[2][0],T[2][1],T[2][2]) pos = PyKDL.Vector(T[0][3], T[1][3], T[2][3]) return PyKDL.Frame(rot, pos) def __init__(self): self.pub_marker = velmautils.MarkerPublisher() def planVis(self, openrave): with openrave.env: debug = True m_id = 0 if debug: self.pub_marker.eraseMarkers(0,3000, frame_id='world') rospy.sleep(0.01) kinect_fov = 30.0/180.0*math.pi # target: key pocket vis_targets = [ ("vis_target_0", 0.1, PyKDL.Vector(0, -0.4, 1.0)), ("vis_target_1", 0.1, PyKDL.Vector(0.1, -0.4, 1.0)), ("vis_target_2", 0.1, PyKDL.Vector(0.1, -0.5, 1.0)), ("vis_target_3", 0.1, PyKDL.Vector(0, -0.5, 1.0)), ("vis_target_4", 0.1, PyKDL.Vector(0.05, -0.45, 1.0)), ] vis_bodies = [] # target: test (vertical axis at the door plane) # vis_targets = [ # ("vis_target_0", 0.1, PyKDL.Vector(1, 0.0, 1.2)), # ("vis_target_1", 0.1, PyKDL.Vector(1, 0.0, 1.3)), # ("vis_target_2", 0.1, PyKDL.Vector(1, 0.0, 1.4)), # ("vis_target_3", 0.1, PyKDL.Vector(1, 0.0, 1.5)), # ("vis_target_4", 0.1, PyKDL.Vector(1, 0.0, 1.6)), # ] for (name, diam, pos) in vis_targets: if debug: m_id = self.pub_marker.publishSinglePointMarker(pos, m_id, r=0, g=1, b=0, a=1, namespace='default', frame_id='world', m_type=Marker.SPHERE, scale=Vector3(diam, diam, diam), T=None) rospy.sleep(0.01) body = openrave.addSphere(name, diam) body.SetTransform(self.KDLToOpenrave(PyKDL.Frame(pos))) vis_bodies.append( body ) openrave.env.Remove( body ) def getSightAngle(openrave, q=None, dof_indices=None): if q != None and dof_indices != None: current_q = openrave.robot_rave.GetDOFValues(dof_indices) openrave.robot_rave.SetDOFValues(q, dof_indices) openrave.env.UpdatePublishedBodies() T_W_C = self.OpenraveToKDL(openrave.robot_rave.GetLink("head_kinect_rgb_optical_frame").GetTransform()) T_C_W = T_W_C.Inverse() angle_sum = 0.0 for (name, size, pos_W) in vis_targets: pos_C = T_C_W * pos_W angle_sum += velmautils.getAngle(pos_C, PyKDL.Vector(0,0,1)) if q != None and dof_indices != None: openrave.robot_rave.SetDOFValues(current_q, dof_indices) openrave.env.UpdatePublishedBodies() return angle_sum def getVisibility(openrave, vis_bodies, q=None, dof_indices=None): rays_hit = 0 m_id = 0 if q != None and dof_indices != None: current_q = openrave.robot_rave.GetDOFValues(dof_indices) openrave.robot_rave.SetDOFValues(q, dof_indices) openrave.env.UpdatePublishedBodies() for body in vis_bodies: openrave.env.Add( body ) T_W_C = self.OpenraveToKDL(openrave.robot_rave.GetLink("head_kinect_rgb_optical_frame").GetTransform()) T_C_W = T_W_C.Inverse() cam_W = T_W_C * PyKDL.Vector() cam_dir_W = PyKDL.Frame(T_W_C.M) * PyKDL.Vector(0,0,0.5) if debug: m_id = self.pub_marker.publishVectorMarker(cam_W, cam_W+cam_dir_W, m_id, 1, 1, 1, frame='world', namespace='kinect_head_rays', scale=0.01) # create rays connecting the optical frame and the target objects for (name, diam, pos_W) in vis_targets: pos_C = T_C_W * pos_W dir_W = pos_W - cam_W if pos_C.z() < 0.1: if debug: m_id = self.pub_marker.publishVectorMarker(cam_W, cam_W+dir_W, m_id, 0, 0, 1, frame='world', namespace='kinect_head_rays', scale=0.01) continue if velmautils.getAngle(PyKDL.Vector(0,0,1), pos_C) > kinect_fov: if debug: m_id = self.pub_marker.publishVectorMarker(cam_W, cam_W+dir_W, m_id, 0, 0, 1, frame='world', namespace='kinect_head_rays', scale=0.01) continue report = CollisionReport() ret = openrave.env.CheckCollision(Ray((cam_W[0], cam_W[1], cam_W[2]), (dir_W[0], dir_W[1], dir_W[2])), report) if ret and report.plink1 != None and report.plink1.GetParent().GetName().find("vis_target_") == 0: rays_hit += 1 if debug: m_id = self.pub_marker.publishVectorMarker(cam_W, cam_W+dir_W, m_id, 0, 1, 0, frame='world', namespace='kinect_head_rays', scale=0.01) else: if debug: m_id = self.pub_marker.publishVectorMarker(cam_W, cam_W+dir_W, m_id, 1, 0, 0, frame='world', namespace='kinect_head_rays', scale=0.01) for body in vis_bodies: openrave.env.Remove( body ) if q != None and dof_indices != None: openrave.robot_rave.SetDOFValues(current_q, dof_indices) openrave.env.UpdatePublishedBodies() return rays_hit # test visibility # if True: # print getVisibility(openrave) dof_names_torso = [ "head_pan_joint", "head_tilt_joint", "torso_0_joint", ] dof_names_left_arm = [ "left_arm_0_joint", "left_arm_1_joint", "left_arm_2_joint", "left_arm_3_joint", "left_arm_4_joint", "left_arm_5_joint", "left_arm_6_joint", ] dof_indices_torso = [] dof_limits_torso = [] for joint_name in dof_names_torso: joint = openrave.robot_rave.GetJoint(joint_name) dof_indices_torso.append( joint.GetDOFIndex() ) lim_lo, lim_up = joint.GetLimits() dof_limits_torso.append( (lim_lo[0], lim_up[0]) ) dof_indices_left_arm = [] dof_limits_left_arm = [] for joint_name in dof_names_left_arm: joint = openrave.robot_rave.GetJoint(joint_name) dof_indices_left_arm.append( joint.GetDOFIndex() ) lim_lo, lim_up = joint.GetLimits() dof_limits_left_arm.append( (lim_lo[0], lim_up[0]) ) # for planning torso movement disable arms links disabled_links_torso = [ "calib_left_arm_base_link", "calib_right_arm_base_link", "left_HandFingerOneKnuckleOneLink", "left_HandFingerOneKnuckleThreeLink", "left_HandFingerOneKnuckleTwoLink", "left_HandFingerThreeKnuckleThreeLink", "left_HandFingerThreeKnuckleTwoLink", "left_HandFingerTwoKnuckleOneLink", "left_HandFingerTwoKnuckleThreeLink", "left_HandFingerTwoKnuckleTwoLink", "left_HandPalmLink", "left_arm_2_link", "left_arm_3_link", "left_arm_4_link", "left_arm_5_link", "left_arm_6_link", "left_arm_7_link", "left_gripper_calib_link", "left_gripper_calib_link1", "left_gripper_calib_link2", "left_gripper_mount_link", "left_hand_camera_link", "left_hand_camera_optical_frame", "right_HandFingerOneKnuckleOneLink", "right_HandFingerOneKnuckleThreeLink", "right_HandFingerOneKnuckleTwoLink", "right_HandFingerThreeKnuckleThreeLink", "right_HandFingerThreeKnuckleTwoLink", "right_HandFingerTwoKnuckleOneLink", "right_HandFingerTwoKnuckleThreeLink", "right_HandFingerTwoKnuckleTwoLink", "right_HandPalmLink", "right_arm_2_link", "right_arm_3_link", "right_arm_4_link", "right_arm_5_link", "right_arm_6_link", "right_arm_7_link", "right_gripper_calib_link", "right_gripper_calib_link1", "right_gripper_calib_link2", "right_gripper_mount_link", "torso_base", "torso_link0", ] # for planning left arm movement disable right arm links disabled_links_left_arm = [ "calib_right_arm_base_link", "right_HandFingerOneKnuckleOneLink", "right_HandFingerOneKnuckleThreeLink", "right_HandFingerOneKnuckleTwoLink", "right_HandFingerThreeKnuckleThreeLink", "right_HandFingerThreeKnuckleTwoLink", "right_HandFingerTwoKnuckleOneLink", "right_HandFingerTwoKnuckleThreeLink", "right_HandFingerTwoKnuckleTwoLink", "right_HandPalmLink", "right_arm_2_link", "right_arm_3_link", "right_arm_4_link", "right_arm_5_link", "right_arm_6_link", "right_arm_7_link", "right_gripper_calib_link", "right_gripper_calib_link1", "right_gripper_calib_link2", "right_gripper_mount_link", ] # planning for torso # for link in openrave.robot_rave.GetLinks(): # if link.GetName() in disabled_links_torso: # link.Enable(False) # else: # link.Enable(True) # TODO # return dof_names = [ "head_pan_joint", "head_tilt_joint", "left_arm_0_joint", "left_arm_1_joint", # "left_arm_2_joint", # "left_arm_3_joint", # "left_arm_4_joint", # "left_arm_5_joint", # "left_arm_6_joint", "right_arm_0_joint", "right_arm_1_joint", # "right_arm_2_joint", # "right_arm_3_joint", # "right_arm_4_joint", # "right_arm_5_joint", # "right_arm_6_joint", "torso_0_joint", # "torso_1_joint", ] dof_indices = [] dof_limits = [] for joint_name in dof_names: joint = openrave.robot_rave.GetJoint(joint_name) dof_indices.append( joint.GetDOFIndex() ) lim_lo, lim_up = joint.GetLimits() dof_limits.append( (lim_lo[0], lim_up[0]) ) print "planning for %s joints"%(len(dof_indices)) self.collision_checks = 0 def isStateValid(state): self.collision_checks += 1 # return True q = [] for i in range(len(dof_indices)): q.append(state[i]) is_valid = True current_q = openrave.robot_rave.GetDOFValues(dof_indices) openrave.robot_rave.SetDOFValues(q, dof_indices) openrave.env.UpdatePublishedBodies() report1 = CollisionReport() report2 = CollisionReport() if openrave.robot_rave.CheckSelfCollision(report1) or openrave.env.CheckCollision(openrave.robot_rave, report2): is_valid = False openrave.robot_rave.SetDOFValues(current_q, dof_indices) openrave.env.UpdatePublishedBodies() return is_valid space = ob.RealVectorStateSpace() for i in range(len(dof_names)): space.addDimension(dof_names[i], dof_limits[i][0], dof_limits[i][1]) # without SimpleSetup si = ob.SpaceInformation(space); si.setStateValidityChecker(ob.StateValidityCheckerFn(isStateValid)) print "si.getStateValidityCheckingResolution", si.getStateValidityCheckingResolution() si.setStateValidityCheckingResolution(0.03) # class InformedValidStateSampler(ob.ValidStateSampler): # def __init__(self, si): # ob.ValidStateSampler.__init__(self, si) # self.name_ = "my sampler" # print "InformedValidStateSampler: init" # def sample(state): # print "sample" # return True # def sampleNear(state_out, state_in, distance): # print "sampleNear" # return True # def allocValidStateSampler(si): # return InformedValidStateSampler(si) si.setup() # si.setValidStateSamplerAllocator(ob.ValidStateSamplerAllocator(allocValidStateSampler)) # si.setValidStateSamplerAllocator(ob.ValidStateSamplerAllocator(allocOBValidStateSampler)) # create a simple setup object # ss = og.SimpleSetup(space) # ss.setStateValidityChecker(ob.StateValidityCheckerFn(isStateValid)) start = ob.State(space) # goal = ob.State(space) current_q = openrave.robot_rave.GetDOFValues(dof_indices) for i in range(len(current_q)): start[i] = current_q[i] # goal[i] = start[i] + 0.05 class VisGoal(ob.GoalRegion): def __init__(self, si): ob.GoalRegion.__init__(self, si) def distanceGoal(self, state): q = [] for i in range(len(dof_indices)): q.append(state[i]) rays_hit = getVisibility(openrave, vis_bodies, q=q, dof_indices=dof_indices) return len(vis_targets) - rays_hit class VisGoal2(ob.Goal): def __init__(self, si): ob.Goal.__init__(self, si) def isSatisfied(self, state): return True # def isSatisfied(state, distance): # print "bbb" # return True class VisOptimizationObjective(ob.OptimizationObjective): def __init__(self, si): ob.OptimizationObjective.__init__(self, si) self.si = si def stateCost(self, state): # q = [] # for i in range(len(dof_indices)): # q.append(state[i]) # cost = getSightAngle(openrave, q=q, dof_indices=dof_indices) # return ob.Cost(cost) return ob.Cost(0.0) cost = 0.0 for i in range(len(dof_indices)): diff = (state[i]-start[i]) cost += diff*diff return ob.Cost(math.sqrt(cost)) def motionCost(self, s1, s2): # q1 = [] # q2 = [] # for i in range(len(dof_indices)): # q1.append(s1[i]) # q2.append(s2[i]) # q_diff = np.array(q2) - np.array(q1) # cost = math.sqrt(np.dot(q_diff, q_diff)) + self.stateCost(s2) - self.stateCost(s1) # cost = math.sqrt(np.dot(q_diff, q_diff)) + self.stateCost(s2).value() - self.stateCost(s1).value() # cost = np.dot(q_diff, q_diff) cost1 = 0.0 for i in range(len(dof_indices)): diff = (s1[i]-start[i]) cost1 += diff*diff cost2 = 0.0 for i in range(len(dof_indices)): diff = (s2[i]-start[i]) cost2 += diff*diff cost = self.si.distance(s1,s2) * (math.sqrt(cost1) + math.sqrt(cost2))*0.5 # + (self.stateCost(s2).value() - self.stateCost(s1).value()) # cost = 0.0 # for i in range(len(dof_indices)): # cost += abs(s1[i] - s2[i]) return ob.Cost(cost) # without SimpleSetup pdef = ob.ProblemDefinition(si) pdef.clearStartStates() pdef.addStartState(start) goal = VisGoal(si) goal.setThreshold(1.0/len(vis_targets)) pdef.setGoal(goal) opt_obj = VisOptimizationObjective(si) pdef.setOptimizationObjective(opt_obj) # pdef.setStartAndGoalStates(start, goal) planner = og.RRTstar(si) print planner planner.setProblemDefinition(pdef) planner.setDelayCC(True) # planner.setPrune(True) # print "pruning:", planner.getPruneStatesImprovementThreshold() # planner.setPruneStatesImprovementThreshold(0.99) planner.setRange(360.0/180.0*math.pi) print planner planner.setup() print planner # ss.setStartAndGoalStates(start, goal) # this will automatically choose a default planner with # default parameters solved = planner.solve(30.0 * 1.0) print "collision_checks", self.collision_checks if solved: # try to shorten the path # ss.simplifySolution() print "getSolutionCount: ", pdef.getSolutionCount() # print the simplified path sol_path = pdef.getSolutionPath() print sol_path print "cost", sol_path.cost(opt_obj).value() sol_path.interpolate(100) traj = [] for state in sol_path.getStates(): traj.append([]) for i in range(len(dof_indices)): traj[-1].append(state[i]) final_state = sol_path.getStates()[-1] q = [] for i in range(len(dof_indices)): q.append(final_state[i]) rays_hit = getVisibility(openrave, vis_bodies, q=q, dof_indices=dof_indices) print "visibility:", rays_hit while True: raw_input(".") if rospy.is_shutdown(): break openrave.showTrajectory(dof_names, 5.0, traj) def spin(self): # # Initialise Openrave # # a1 = np.array([1,2,3]) # a2 = np.array([3,2,1]) # print a1*3 # exit(0) rospack = rospkg.RosPack() env_file=rospack.get_path('velma_scripts') + '/data/key/vis_test.env.xml' xacro_uri=rospack.get_path('velma_description') + '/robots/velma.urdf.xacro' srdf_path=rospack.get_path('velma_description') + '/robots/' openrave = openraveinstance.OpenraveInstance() openrave.startOpenraveURDF(env_file=env_file) # openrave.readRobot(xacro_uri=rospack.get_path('velma_description') + '/robots/velma.urdf.xacro', srdf_uri=rospack.get_path('velma_description') + '/robots/velma.srdf') openrave.readRobot(xacro_uri=xacro_uri, srdf_path=srdf_path) # for link in openrave.robot_rave.GetLinks(): # print link.GetName() # exit(0) openrave.setCamera(PyKDL.Vector(2.0, 0.0, 2.0), PyKDL.Vector(0.60, 0.0, 1.10)) print "creating interface for Velma..." # create the interface for Velma robot self.velma = Velma() print "done." rospy.sleep(0.5) self.velma.updateTransformations() # T_W_T = self.velma.T_W_E * PyKDL.Frame(PyKDL.Vector(0,0,0.17)) # print T_W_T.M.GetQuaternion() # print T_W_T.p # exit(0) openrave.updateRobotConfigurationRos(self.velma.js_pos) self.planVis(openrave) raw_input("Press ENTER to exit...") exit(0) rospy.sleep(1) openrave.runOctomap() sphere = RaveCreateKinBody(openrave.env,'') sphere.SetName("sphere") sphere.InitFromSpheres(numpy.array([[0,0,0,0.05]]),True) openrave.env.Add(sphere,True) # test the collision checker for octomap if True: raw_input("Press ENTER to continue...") ob = openrave.env.GetKinBody("_OCTOMAP_MAP_") cc = openrave.env.GetCollisionChecker() m_id = 0 for x in np.linspace(0,1.5,30): for y in np.linspace(-1,1,40): for z in np.linspace(1,2,20): # print x,y,z tr = self.KDLToOpenrave(PyKDL.Frame(PyKDL.Vector(x,y,z))) sphere.SetTransform(tr) openrave.env.UpdatePublishedBodies() report = CollisionReport() ret = cc.CheckCollision(sphere, report) # ret = openrave.env.CheckCollision(ob, report) # print ret if ret: m_id = self.pub_marker.publishSinglePointMarker(PyKDL.Vector(x,y,z), m_id, r=1, g=0, b=0, a=1, namespace='default', frame_id='world', m_type=Marker.SPHERE, scale=Vector3(0.1, 0.1, 0.1), T=None) continue if report.plink1 == None: print None else: print report.plink1.GetParent().GetName(), report.plink2.GetName() # print " ", report.vLinkColliding for link1, link2 in report.vLinkColliding: print " ", link1.GetParent().GetName(), link2.GetName() # print report.plink1.GetParent().GetName(), report.plink2.GetParent().GetName() exit(0) self.pub_head_look_at = rospy.Publisher("/head_lookat_pose", geometry_msgs.msg.Pose) raw_input("Press ENTER to look around...") # self.pub_head_look_at.publish(pm.toMsg(PyKDL.Frame(PyKDL.Vector(0.2,-0.5,1)))) # raw_input("Press ENTER to exit...") # exit(0) raw_input("Press ENTER to look around...") self.pub_head_look_at.publish(pm.toMsg(PyKDL.Frame(PyKDL.Vector(1,0,2)))) raw_input("Press ENTER to look around...") self.pub_head_look_at.publish(pm.toMsg(PyKDL.Frame(PyKDL.Vector(0.2,1,2)))) raw_input("Press ENTER to look around...") self.pub_head_look_at.publish(pm.toMsg(PyKDL.Frame(PyKDL.Vector(0.2,1,1.2)))) raw_input("Press ENTER to look around...") self.pub_head_look_at.publish(pm.toMsg(PyKDL.Frame(PyKDL.Vector(1,0,1.2)))) raw_input("Press ENTER to look around...") self.pub_head_look_at.publish(pm.toMsg(PyKDL.Frame(PyKDL.Vector(0.2,-1,1.2)))) raw_input("Press ENTER to look around...") self.pub_head_look_at.publish(pm.toMsg(PyKDL.Frame(PyKDL.Vector(0.2,-1,2)))) raw_input("Press ENTER to look around...") self.pub_head_look_at.publish(pm.toMsg(PyKDL.Frame(PyKDL.Vector(1,0,2)))) raw_input(".") exit(0)
def spin(self): # # Initialise Openrave # # a1 = np.array([1,2,3]) # a2 = np.array([3,2,1]) # print a1*3 # exit(0) rospack = rospkg.RosPack() env_file=rospack.get_path('velma_scripts') + '/data/key/vis_test.env.xml' xacro_uri=rospack.get_path('velma_description') + '/robots/velma.urdf.xacro' srdf_path=rospack.get_path('velma_description') + '/robots/' openrave = openraveinstance.OpenraveInstance() openrave.startOpenraveURDF(env_file=env_file) # openrave.readRobot(xacro_uri=rospack.get_path('velma_description') + '/robots/velma.urdf.xacro', srdf_uri=rospack.get_path('velma_description') + '/robots/velma.srdf') openrave.readRobot(xacro_uri=xacro_uri, srdf_path=srdf_path) # for link in openrave.robot_rave.GetLinks(): # print link.GetName() # exit(0) openrave.setCamera(PyKDL.Vector(2.0, 0.0, 2.0), PyKDL.Vector(0.60, 0.0, 1.10)) print "creating interface for Velma..." # create the interface for Velma robot self.velma = Velma() print "done." rospy.sleep(0.5) self.velma.updateTransformations() # T_W_T = self.velma.T_W_E * PyKDL.Frame(PyKDL.Vector(0,0,0.17)) # print T_W_T.M.GetQuaternion() # print T_W_T.p # exit(0) openrave.updateRobotConfigurationRos(self.velma.js_pos) self.planVis(openrave) raw_input("Press ENTER to exit...") exit(0) rospy.sleep(1) openrave.runOctomap() sphere = RaveCreateKinBody(openrave.env,'') sphere.SetName("sphere") sphere.InitFromSpheres(numpy.array([[0,0,0,0.05]]),True) openrave.env.Add(sphere,True) # test the collision checker for octomap if True: raw_input("Press ENTER to continue...") ob = openrave.env.GetKinBody("_OCTOMAP_MAP_") cc = openrave.env.GetCollisionChecker() m_id = 0 for x in np.linspace(0,1.5,30): for y in np.linspace(-1,1,40): for z in np.linspace(1,2,20): # print x,y,z tr = self.KDLToOpenrave(PyKDL.Frame(PyKDL.Vector(x,y,z))) sphere.SetTransform(tr) openrave.env.UpdatePublishedBodies() report = CollisionReport() ret = cc.CheckCollision(sphere, report) # ret = openrave.env.CheckCollision(ob, report) # print ret if ret: m_id = self.pub_marker.publishSinglePointMarker(PyKDL.Vector(x,y,z), m_id, r=1, g=0, b=0, a=1, namespace='default', frame_id='world', m_type=Marker.SPHERE, scale=Vector3(0.1, 0.1, 0.1), T=None) continue if report.plink1 == None: print None else: print report.plink1.GetParent().GetName(), report.plink2.GetName() # print " ", report.vLinkColliding for link1, link2 in report.vLinkColliding: print " ", link1.GetParent().GetName(), link2.GetName() # print report.plink1.GetParent().GetName(), report.plink2.GetParent().GetName() exit(0) self.pub_head_look_at = rospy.Publisher("/head_lookat_pose", geometry_msgs.msg.Pose) raw_input("Press ENTER to look around...") # self.pub_head_look_at.publish(pm.toMsg(PyKDL.Frame(PyKDL.Vector(0.2,-0.5,1)))) # raw_input("Press ENTER to exit...") # exit(0) raw_input("Press ENTER to look around...") self.pub_head_look_at.publish(pm.toMsg(PyKDL.Frame(PyKDL.Vector(1,0,2)))) raw_input("Press ENTER to look around...") self.pub_head_look_at.publish(pm.toMsg(PyKDL.Frame(PyKDL.Vector(0.2,1,2)))) raw_input("Press ENTER to look around...") self.pub_head_look_at.publish(pm.toMsg(PyKDL.Frame(PyKDL.Vector(0.2,1,1.2)))) raw_input("Press ENTER to look around...") self.pub_head_look_at.publish(pm.toMsg(PyKDL.Frame(PyKDL.Vector(1,0,1.2)))) raw_input("Press ENTER to look around...") self.pub_head_look_at.publish(pm.toMsg(PyKDL.Frame(PyKDL.Vector(0.2,-1,1.2)))) raw_input("Press ENTER to look around...") self.pub_head_look_at.publish(pm.toMsg(PyKDL.Frame(PyKDL.Vector(0.2,-1,2)))) raw_input("Press ENTER to look around...") self.pub_head_look_at.publish(pm.toMsg(PyKDL.Frame(PyKDL.Vector(1,0,2)))) raw_input(".") exit(0)