def get_handles(self): cam_rgb_names = ["Camera"] cam_rgb_handles = [ vrep.simxGetObjectHandle(self.client_id, n, vrep.simx_opmode_blocking)[1] for n in cam_rgb_names ] octree_handle = vu.get_handle_by_name(self.client_id, 'Octree') vision_sensor = vu.get_handle_by_name(self.client_id, 'Vision_sensor') return dict( octree=octree_handle, cam_rgb=cam_rgb_handles, vision_sensor=vision_sensor, )
def get_handles(self): cam_rgb_names = ["Camera"] cam_rgb_handles = [ vrep.simxGetObjectHandle(self.client_id, n, vrep.simx_opmode_blocking)[1] for n in cam_rgb_names ] octree_handle = vu.get_handle_by_name(self.client_id, 'Octree') anchor_obj_handle = vu.get_handle_by_name(self.client_id, 'Cuboid0') other_obj_handle = vu.get_handle_by_name(self.client_id, 'Cuboid') return dict( anchor_obj=anchor_obj_handle, other_obj=other_obj_handle, octree=octree_handle, cam_rgb=cam_rgb_handles, )
def get_handles(self): client_id = self.client_id _handle_by_name = lambda n: vu.get_handle_by_name(client_id, n) joint_names = [ "Prismatic_joint_X", "Prismatic_joint_Y", "Prismatic_joint_Z", "Revolute_joint_Z", "Revolute_joint_Y", "Revolute_joint_X", # "Prismatic_joint_Fing1", # "Prismatic_joint_Fing2" ] joint_handles = [_handle_by_name(name) for name in joint_names] ft_names = ["FT_sensor_EE"] ft_handles = [_handle_by_name(n) for n in ft_names] ''' ee_names = ["EndEffector"] ee_handles = [_handle_by_name(n) for n in ee_names] ft_names = ["FT_sensor_Wrist", "FT_sensor_EE"] ft_handles = [_handle_by_name(n) for n in ft_names] cam_rgb_names = ["PalmCam_RGB"] cam_rgb_handles =[_handle_by_name(n) for n in cam_rgb_names] cam_depth_names = ["PalmCam_Depth"] cam_depth_handles =[_handle_by_name(n) for n in cam_depth_names] ''' robot_handles_dict = { "joint": joint_handles, "ft": ft_handles, # "EE": ee_handles, # "FT":ft_handles, # "RGB":cam_rgb_handles, # "Depth":cam_depth_handles } ## Start Data Streaming joint_pos = [ vu.get_joint_position(client_id, h) for h in joint_handles ] ''' EEPose=vrep.simxGetObjectPosition(clientID,RobotHandles["EE"][0],-1,vrep.simx_opmode_streaming)[1]; EEPose.extend(vrep.simxGetObjectOrientation(clientID,RobotHandles["EE"][0],-1,vrep.simx_opmode_streaming)[1]); FTVal=[vrep.simxReadForceSensor(clientID,FTHandle,vrep.simx_opmode_streaming)[2:4] for FTHandle in RobotHandles["FT"]]; RobotVisionRGB=[vrep.simxGetVisionSensorImage(clientID,sensorHandle,0,vrep.simx_opmode_streaming) for sensorHandle in RobotHandles["RGB"]]; RobotVisionDepth=[vrep.simxGetVisionSensorDepthBuffer(clientID,sensorHandle,vrep.simx_opmode_streaming) for sensorHandle in RobotHandles["Depth"]]; ''' return robot_handles_dict
def get_joint_pose_vrep(clientID, joint_names=None): if not joint_names: joint_names = JOINT_NAMES poses = [] for handle in [vu.get_handle_by_name(clientID, j) for j in joint_names]: # Query VREP to get the parameters origin = vu.get_object_position(clientID, handle) rotation = vu.get_object_orientation(clientID, handle) poses.append((origin, rotation)) return poses
def get_cuboids(clientID, names, append=True): if append: handles = [(vu.get_handle_by_name(clientID, j + '_collision_cuboid'), j) for j in names] else: handles = [(vu.get_handle_by_name(clientID, j), j) for j in names] cuboids = [] for (handle, name) in handles: # Query VREP to get the parameters min_pos, max_pos = vu.get_object_bounding_box(clientID, handle) origin = vu.get_object_position(clientID, handle) rotation = vu.get_object_orientation(clientID, handle) # Cuboid parameters min_pos = np.asarray(min_pos) max_pos = np.asarray(max_pos) dxyz = np.abs(max_pos - min_pos) # Cuboid objects cuboids.append(Cuboid(origin, rotation, dxyz, name, vrep=True)) return cuboids
def get_handles(self): client_id = self.client_id _handle_by_name = lambda n: vu.get_handle_by_name(client_id, n) joint_names = [ "Prismatic_joint_X", "Prismatic_joint_Y", "Prismatic_joint_Z", "Revolute_joint_Z", "Revolute_joint_Y", "Revolute_joint_X", # "Prismatic_joint_Fing1", # "Prismatic_joint_Fing2" ] joint_handles = [_handle_by_name(name) for name in joint_names] ft_names = ["FT_sensor_EE"] ft_handles = [_handle_by_name(n) for n in ft_names] octree_handle = _handle_by_name("Octree") # vision_sensor = _handle_by_name("Vision_sensor") robot_handles_dict = dict( joint=joint_handles, ft=ft_handles, octree=octree_handle, # vision_sensor=vision_sensor, # "EE": ee_handles, # "FT":ft_handles, # "RGB":cam_rgb_handles, # "Depth":cam_depth_handles ) ## Start Data Streaming joint_pos = [ vu.get_joint_position(client_id, h) for h in joint_handles ] return robot_handles_dict
res = vu.load_scene(client_id, '/home/mohit/projects/cooking/code/sim_vrep/scene_0.ttt') print(res) if res != vrep.simx_return_ok: print("Error in loading scene: {}".format(res)) vu.start_sim(client_id) cam_rgb_names = ["Camera"] cam_rgb_handles = [ vrep.simxGetObjectHandle(client_id, n, vrep.simx_opmode_blocking)[1] for n in cam_rgb_names ] print("Camera handle: {}".format(cam_rgb_handles)) octree_handle = vu.get_handle_by_name(client_id, 'Octree') print('Octree handle: {}'.format(octree_handle)) voxel_list = [] for t in range(100): vu.step_sim(client_id) res = \ vrep.simxCallScriptFunction( client_id, "Octree", vrep.sim_scripttype_childscript, "ms_GetOctreeVoxels", [int(octree_handle)], [], [], bytearray(),