def occluding(object, camera_position, world=None): """ This reasoning query lists the objects which are occluding a given object. This works similar to 'visible'. First the object alone will be rendered and the position of the pixels of the object in the picture will be saved. After that the complete scene will be rendered and the previous saved pixel positions will be compared to the actual pixels, if in one pixel an other object is visible ot will be saved as occluding. :param object: The object for which occluding should be checked :param camera_position: The position from which the camera looks at the object :param world: The BulletWorld if more than one BulletWorld is active :return: A list of occluding objects """ world, world_id = _world_and_id(world) state = p.saveState() for obj in world.objects: if obj.id is not object.id: # p.removeBody(object.id, physicsClientId=world_id) # Hot fix until I come up with something better p.resetBasePositionAndOrientation(obj.id, [100, 100, 100], [0, 0, 0, 1], world_id) seg_mask = _get_seg_mask_for_target(camera_position, object.get_position()) pixels = [] for i in range(0, 256): for j in range(0, 256): if seg_mask[i][j] == object.id: pixels.append((i, j)) p.restoreState(state) occluding = [] seg_mask = _get_seg_mask_for_target(camera_position, object.get_position()) for c in pixels: if not seg_mask[c[0]][c[1]] == object.id: occluding.append(seg_mask[c[0]][c[1]]) return list(set(map(lambda x: world.get_object_by_id(x), occluding)))
def setup(self, dims=None): #create constraint and a second cup self.cupStartPos = (0, -0.6, 0) self.cupStartOrientation = p.getQuaternionFromEuler([0, 0, 0]) #pick random cup if dims is None: self.cup_name = np.random.choice(self.cup_to_dims.keys()) else: found_cup = False for name in self.cup_to_dims.keys(): if self.cup_to_dims[name][0] == dims[0] and self.cup_to_dims[ name][1] == dims[1]: found_cup = True self.cup_name = name assert (found_cup) cup_file = "urdf/cup/" + self.cup_name self.target_cup = p.loadURDF(cup_file, self.cupStartPos, self.cupStartOrientation, globalScaling=k * 5) self.base_world.drop_beads_in_cup() self.cid = p.createConstraint(self.base_world.cupID, -1, -1, -1, p.JOINT_FIXED, self.cupStartPos, self.cupStartOrientation, [0, 0, 1]) self.bullet_id = p.saveState()
def place_object(self, visualize=False, shape_class=Rectangle, hole_goal=None, push_down=True): state = p.saveState() if hole_goal is None: hole_goal = self.hole_goal.copy() hole_goal[0][-1] = 0.03 else: np.save("custom_hole_goal.npy", hole_goal) hole_goal[0][-1] += 0.002 res = self.sample_trajs(hole_goal, shape_class=shape_class, placing=True) if res is None: return res traj, grasp = res traj = traj[0] #only need to take the first grasp = grasp[0] if push_down: traj = self.get_push_down_traj(traj, shape_class=shape_class) # end moving in that last bit if visualize: self.visualize_traj(traj) #n_pts = min(20, len(traj)) n_pts = min(7, len(traj)) traj = self.apply_mask(traj, n_pts=n_pts) assert (traj is not None) p.restoreState(state) return traj
def reset_agent(self, env): """ Reset robot initial pose. Sample initial pose, check validity, and land it. :param env: environment instance """ reset_success = False max_trials = 100 # cache pybullet state # TODO: p.saveState takes a few seconds, need to speed up state_id = p.saveState() for _ in range(max_trials): initial_pos, initial_orn = self.sample_initial_pose(env) reset_success = env.test_valid_position(env.robots[0], initial_pos, initial_orn) p.restoreState(state_id) if reset_success: break if not reset_success: logging.warning("WARNING: Failed to reset robot without collision") env.land(env.robots[0], initial_pos, initial_orn) p.removeState(state_id) for reward_function in self.reward_functions: reward_function.reset(self, env)
def reset_interactive_objects(self, env): """ Reset the poses of interactive objects to have no collisions with the scene or the robot :param env: environment instance """ max_trials = 100 for obj in self.interactive_objects: # TODO: p.saveState takes a few seconds, need to speed up state_id = p.saveState() for _ in range(max_trials): _, pos = env.scene.get_random_point(floor=self.floor_num) orn = np.array([0, 0, np.random.uniform(0, np.pi * 2)]) reset_success = env.test_valid_position(obj, pos, orn) p.restoreState(state_id) if reset_success: break if not reset_success: print( "WARNING: Failed to reset interactive obj without collision" ) env.land(obj, pos, orn)
def blocking(object, robot, gripper_name, world=None): """ This reasoning query checks if any objects are blocking an other object when an robot tries to pick it. This works similar to the reachable predicate. First the inverse kinematics between the robot and the object will be calculated and applied. Then it will be checked if the robot is in contact with any object except the given one. :param object: The object for which blocking objects should be found :param robot: The robot who reaches for the object :param gripper_name: The name of the end effector of the robot :param world: The BulletWorld if more than one BulletWorld is active :return: A list of objects the robot is in collision with when reaching for the specified object """ world, world_id = _world_and_id(world) state = p.saveState(physicsClientId=world_id) arm = "left" if gripper_name == robot_description.i.get_tool_frame( "left") else "right" joints = robot_description.i._safely_access_chains(arm).joints target = _transform_to_torso(object.get_position_and_orientation(), robot) inv = request_ik(robot_description.i.base_frame, gripper_name, target, robot, joints) # Hack because kdl outputs more joint values than there are joints given joints = [robot_description.i.torso_joint] + joints for i in range(len(inv)): robot.set_joint_state(joints[i], inv[i]) block = [] for obj in world.objects: if obj == object: continue if contact(robot, obj, world): block.append(obj) p.restoreState(state, physicsClientId=world_id) return block
def reset_agent(self, env): """ Reset robot initial pose. Sample initial pose and target position, check validity, and land it. :param env: environment instance """ reset_success = False max_trials = 100 # cache pybullet state # TODO: p.saveState takes a few seconds, need to speed up state_id = p.saveState() for i in range(max_trials): initial_pos, initial_orn, target_pos = \ self.sample_initial_pose_and_target_pos(env) reset_success = env.test_valid_position( env.robots[0], initial_pos, initial_orn) and \ env.test_valid_position( env.robots[0], target_pos) p.restoreState(state_id) if reset_success: break if not reset_success: logging.warning("WARNING: Failed to reset robot without collision") p.removeState(state_id) self.target_pos = target_pos self.initial_pos = initial_pos super(PointNavRandomTask, self).reset_agent(env)
def sample_trajs(self, goal): ee_goals = [] grasps = [] state = p.saveState() sample_joint = 6 original = ut.get_joint_position(self.robot, sample_joint) grasp_symmetries = [-np.pi / 2, 0, np.pi / 2 ] # fix eventually shape_class.grasp_symmetries() self.grasp_offset = 0.010 + self.franka_tool_to_pb_link for sym in grasp_symmetries: if original + sym < 3 and original + sym > -3: ut.set_joint_position(self.robot, sample_joint, original + sym) curr_pos = ut.get_joint_position(self.robot, sample_joint) ee_goal, grasp = self.get_closest_ee_goals( goal, shape_class=Rectangle, grasp_offset=self.grasp_offset) ee_goals.append(ee_goal) grasps.append(grasp) p.restoreState(state) #grasp = grasp_from_ee_and_obj(ee_goal, shape_goal) working_grasp = None working_traj = None for ee_goal, grasp in zip(ee_goals, grasps): grasp_traj = self.make_traj(ee_goal) if grasp_traj is not None: working_grasp = grasp working_traj = grasp_traj assert (working_traj is not None) return working_traj, working_grasp
def reachable_pose(pose, robot, gripper_name, world=None, threshold=0.01): """ This reasoning query checks if the robot can reach a given position. To determine this the inverse kinematics are calculated and applied. Afterwards the distance between the position and the given end effector is calculated, if it is smaller than the threshold the reasoning query returns True, if not it returns False. :param pose: The position for that reachability should be checked :param robot: The robot that should reach for the position :param gripper_name: The name of the end effector :param world: The BulletWorld in which the reasoning query should opperate :param threshold: The threshold between the end effector and the position. :return: True if the end effector is closer than the threshold True if the end effector is closer than the threshold to the target position, False in every other case to the target position, False in every other case """ world, world_id = _world_and_id(world) state = p.saveState(physicsClientId=world_id) arm = "left" if gripper_name == robot_description.i.get_tool_frame( "left") else "right" joints = robot_description.i._safely_access_chains(arm).joints target = _transform_to_torso([pose, [0, 0, 0, 1]], robot) target = (target[0], [0, 0, 0, 1]) inv = request_ik(robot_description.i.base_frame, gripper_name, target, robot, joints) # Hack because kdl outputs more joint values than there are joints given joints = [robot_description.i.torso_joint] + joints for i in range(len(inv)): robot.set_joint_state(joints[i], inv[i]) newp = p.getLinkState(robot.id, robot.get_link_id(gripper_name), physicsClientId=world_id)[4] diff = [pose[0] - newp[0], pose[1] - newp[1], pose[2] - newp[2]] p.restoreState(state, physicsClientId=world_id) return np.sqrt(diff[0]**2 + diff[1]**2 + diff[2]**2) < threshold
def reachable_pose(pose, robot, gripper_name, world=None, threshold=0.01): """ This reasoning query checks if the robot can reach a given position. To determine this the inverse kinematics are calculated and applied. Afterwards the distance between the position and the given end effector is calculated, if it is smaller than the threshold the reasoning query returns True, if not it returns False. :param pose: The position for that reachability should be checked :param robot: The robot that should reach for the position :param gripper_name: The name of the end effector :param world: The BulletWorld in which the reasoning query should opperate :param threshold: The threshold between the end effector and the position. :return: True if the end effector is closer than the threshold True if the end effector is closer than the threshold to the target position, False in every other case to the target position, False in every other case """ world, world_id = _world_and_id(world) state = p.saveState(physicsClientId=world_id) inv = p.calculateInverseKinematics(robot.id, robot.get_link_id(gripper_name), pose, maxNumIterations=100, physicsClientId=world_id) for i in range(p.getNumJoints(robot.id, world_id)): qIndex = p.getJointInfo(robot.id, i, world_id)[3] if qIndex > -1: p.resetJointState(robot.id, i, inv[qIndex - 7], physicsClientId=world_id) newp = p.getLinkState(robot.id, robot.get_link_id(gripper_name), physicsClientId=world_id)[4] diff = [pose[0] - newp[0], pose[1] - newp[1], pose[2] - newp[2]] p.restoreState(state, physicsClientId=world_id) return np.sqrt(diff[0]**2 + diff[1]**2 + diff[2]**2) < threshold
def _reset(self): if (self.stateId >= 0): p.restoreState(self.stateId) r = BaseBulletEnv._reset(self) if (self.stateId < 0): self.stateId = p.saveState() return r
def blocking(object, robot, gripper_name, world=None): """ This reasoning query checks if any objects are blocking an other object when an robot tries to pick it. This works similar to the reachable predicate. First the inverse kinematics between the robot and the object will be calculated and applied. Then it will be checked if the robot is in contact with any object except the given one. :param object: The object for which blocking objects should be found :param robot: The robot who reaches for the object :param gripper_name: The name of the end effector of the robot :param world: The BulletWorld if more than one BulletWorld is active :return: A list of objects the robot is in collision with when reaching for the specified object """ world, world_id = _world_and_id(world) state = p.saveState(physicsClientId=world_id) inv = p.calculateInverseKinematics(robot.id, robot.get_link_id(gripper_name), object.get_pose(), maxNumIterations=100, physicsClientId=world_id) for i in range(0, p.getNumJoints(robot.id, world_id)): qIndex = p.getJointInfo(robot.id, i, world_id)[3] if qIndex > -1: p.resetJointState(robot.id, i, inv[qIndex - 7], physicsClientId=world_id) block = [] for obj in world.objects: if obj == object: continue if contact(robot, obj, world): block.append(obj) p.restoreState(state, physicsClientId=world_id) return block
def visible(object, camera_position, world=None): """ This reasoning query checks if an object is visible from a given position. This will be achieved by rendering the object alone and counting the visible pixel, then rendering the complete scene and compare the visible pixels with the absolut count of pixels. :param object: The object for which the visibility should be checked :param camera_position: The position of which the camera looks at the object :param world: The BulletWorld if more than one BulletWorld is active :return: True if the object is visible from the camera_position False if not """ world, world_id = _world_and_id(world) state = p.saveState() for obj in world.objects: if obj.id is not object.id: # p.removeBody(object.id, physicsClientId=world_id) # Hot fix until I come up with something better p.resetBasePositionAndOrientation(obj.id, [100, 100, 100], [0, 0, 0, 1], world_id) seg_mask = _get_seg_mask_for_target(camera_position, object.get_position()) flat_list = list(itertools.chain.from_iterable(seg_mask)) max_pixel = sum(list(map(lambda x: 1 if x == object.id else 0, flat_list))) p.restoreState(state) seg_mask = _get_seg_mask_for_target(camera_position, object.get_position()) flat_list = list(itertools.chain.from_iterable(seg_mask)) real_pixel = sum(list(map(lambda x: 1 if x == object.id else 0, flat_list))) if max_pixel == 0: # Object is not visible raise False return real_pixel / max_pixel > 0.8 > 0
def setup(self, bin_configuration_yaml, starting_state_fname=None, starting_robot_pose=None, starting_time=0.0, auto_enable_timer=0.0): """Setups the game elements. :param bin_configuration_yaml: the yaml file to read for bin setup. :param starting_state_fname: the starting state file of simulation. :param starting_robot_pose: the starting pose of the mobile robot. :param starting_time: the starting time of the simulation. :param auto_enable_timer: timer to auto enable robot, 0 disables. """ self.load_environment(bin_configuration_yaml) self.load_agents(initial_mobile_pose=starting_robot_pose) if not self.hide_ui: self.load_ui() if starting_state_fname: p.restoreState(fileName=starting_state_fname) self.starting_state = p.saveState() self.initial_auto_enable_timer = auto_enable_timer self.auto_enable_timer = self.initial_auto_enable_timer self.auto_enabled = False self.starting_time = starting_time self.time = self.starting_time if self.use_interactive and self.is_interactive_realtime: self.prev = time.time() self.info_id = Utilities.draw_debug_info(self.time)
def grasp_object(self, shape_class=Rectangle, visualize=False): shape_goal = p.getBasePositionAndOrientation( self.shape_name_to_shape[shape_class.__name__]) trajs, grasps = self.sample_trajs(shape_goal, shape_class=shape_class) state = p.saveState() for traj, grasp in zip(trajs, grasps): #make sure you can insert it as well self.set_joints(traj[-1]) self.grasp = grasp res = self.sample_trajs(self.hole_goal, shape_class=shape_class, placing=True) if res is None or len(res) == 0: continue print("Found path that can be inserted as well") insert_traj = res[0][0] insert_traj = self.get_push_down_traj(insert_traj, shape_class=shape_class) n_pts = min(8, len(insert_traj)) print("n pts for grasp", n_pts) insert_traj = self.apply_mask(insert_traj, n_pts=n_pts) break p.restoreState(state) traj = self.apply_mask(traj, n_pts=5) if visualize: self.visualize_traj(traj) self.attach_shape(shape_class, grasp) assert (traj is not None) return traj, insert_traj
def __init__(self, renders=False, realWorld=0): self.renders = renders if (renders): pybullet.connect(pybullet.GUI) else: pybullet.connect(pybullet.DIRECT) action_high = np.array([1, 1, 1, 1, 1, 1, 1, 1]) self.action_space = spaces.Box(low=-action_high, high=action_high) self.observation_space = spaces.Box(low=-2, high=2, shape=(12, )) self.seed(int(time.time())) self.dog = pybullet.loadURDF(currentdir + "/urdf/blackbird.urdf", [0, 0, 2], flags=pybullet.URDF_USE_SELF_COLLISION) pybullet.resetBasePositionAndOrientation(self.dog, [0, 0, 1.2], [0, 0, 0.707, 0.707]) self.plane = pybullet.loadURDF( os.path.join(pybullet_data.getDataPath(), "plane.urdf"), 0, 0, 0) pybullet.changeDynamics(self.plane, -1, lateralFriction=25) self.numJoints = pybullet.getNumJoints(self.dog) self.timeStep = 1.0 / 240 self.currentSimTime = 0.0 self.timesRun = 0 pybullet.setGravity(0, 0, -9.8) pybullet.setRealTimeSimulation(realWorld) self.resetState = pybullet.saveState() self.reset()
def __init__(self): # Two environments? One for graphics, one w/o graphics? self.physicsClient = p.connect( p.GUI) #or p.DIRECT for non-graphical version self.jointIds = [] self.paramIds = [] p.setAdditionalSearchPath(pybullet_data.getDataPath()) #optionally p.setGravity(0, 0, -10) p.loadURDF("plane.urdf") self.humanoid = p.loadURDF(absolute_path_urdf, [0, 0, 0.8], useFixedBase=False) self.cubeStartPos = [0, 0, 1] self.cubeStartOrientation = p.getQuaternionFromEuler([0, 0, 0]) print("The number of joints on the Cassie robot is " + str(p.getNumJoints(self.humanoid))) # Setup the debugParam sliders # Why -10, 10, -10 self.gravId = p.addUserDebugParameter("gravity", -10, 10, -10) self.homePositionAngles = [ 0, 0, 1.0204, -1.97, -0.084, 2.06, -1.9, 0, 0, 1.0204, -1.97, -0.084, 2.06, -1.9, 0 ] activeJoint = 0 for j in range(p.getNumJoints(self.humanoid)): # Why set the damping factors to 0? p.changeDynamics(self.humanoid, j, linearDamping=0, angularDamping=0) info = p.getJointInfo(self.humanoid, j) jointName = info[1] jointType = info[2] if (jointType == p.JOINT_PRISMATIC or jointType == p.JOINT_REVOLUTE): self.jointIds.append(j) self.paramIds.append( p.addUserDebugParameter( jointName.decode("utf-8"), -4, 4, self.homePositionAngles[activeJoint])) p.resetJointState(self.humanoid, j, self.homePositionAngles[activeJoint]) activeJoint = activeJoint + 1 # What exactly does this do? p.setRealTimeSimulation(1) self.stateId = p.saveState( ) # Stores state in memory rather than on disk
def save_state(self): """ Returns the id of the saved state of the BulletWorld """ objects2attached = {} for o in self.objects: objects2attached[o] = (o.attachments.copy(), o.cids.copy()) return p.saveState(self.client_id), objects2attached
def save_state(self): """ Save the environment state to the .bullet file. Args : None Returns: state_id (int) in memory identifier of state in pybullet. """ state_id = pb.saveState(self.world.physics_id) return state_id
def main(viewer=False, display=True, simulate=False, teleport=False): # TODO: fix argparse & FastDownward #parser = argparse.ArgumentParser() # Automatically includes help #parser.add_argument('-viewer', action='store_true', help='enable viewer.') #parser.add_argument('-display', action='store_true', help='enable viewer.') #args = parser.parse_args() connect(use_gui=viewer) problem_fn = holding_problem # holding_problem | stacking_problem | cleaning_problem | cooking_problem # cleaning_button_problem | cooking_button_problem problem = problem_fn() state_id = p.saveState() #saved_world = WorldSaver() dump_world() pddlstream_problem = pddlstream_from_problem(problem, teleport=teleport) _, _, _, stream_map, init, goal = pddlstream_problem synthesizers = [ #StreamSynthesizer('safe-base-motion', {'plan-base-motion': 1, # 'TrajPoseCollision': 0, 'TrajGraspCollision': 0, 'TrajArmCollision': 0}, # from_fn(get_base_motion_synth(problem, teleport))), ] print('Init:', init) print('Goal:', goal) print('Streams:', stream_map.keys()) print('Synthesizers:', synthesizers) pr = cProfile.Profile() pr.enable() solution = solve_focused(pddlstream_problem, synthesizers=synthesizers, max_cost=INF) print_solution(solution) plan, cost, evaluations = solution pr.disable() pstats.Stats(pr).sort_stats('tottime').print_stats(10) if plan is None: return if (not display) or (plan is None): p.disconnect() return if viewer: p.restoreState(state_id) else: disconnect() connect(use_gui=True) problem = problem_fn() # TODO: way of doing this without reloading? user_input('Execute?') commands = post_process(problem, plan) if simulate: enable_gravity() control_commands(commands) else: step_commands(commands, time_step=0.01) user_input('Finish?') disconnect()
def _reset(self): if (self.stateId >= 0): #print("InvertedPendulumBulletEnv reset p.restoreState(",self.stateId,")") p.restoreState(self.stateId) r = BaseBulletEnv._reset(self) if (self.stateId < 0): self.stateId = p.saveState() #print("InvertedPendulumBulletEnv reset self.stateId=",self.stateId) return r
def reset(self): if self.initialSetup is False: self.sid = self.setupWorld(self.render) self.initialSetup = True self._load_objects() self.initialState = pb.saveState() else: pb.restoreState(self.initialState) return self._reset()
def __init__(self, display=False, obj='cube', random_obj=False, pos_cam=[1.3, 180, -40], gripper_display=False, steps_to_roll=1, random_var=0.01, delta_pos=[0, 0]): self.obj = obj self.display = display self.random_obj = random_obj self.pos_cam = pos_cam self.gripper_display = gripper_display self.steps_to_roll = steps_to_roll self.random_var = random_var self.delta_pos = delta_pos if self.display: self.physics_client = p.connect(p.GUI) else: self.physics_client = p.connect(p.DIRECT) p.setAdditionalSearchPath(pybullet_data.getDataPath()) # setup the world self.setup_world() # save the state self.save_state = p.saveState() # configure display if necessary if self.display: p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 1) p.configureDebugVisualizer(p.COV_ENABLE_GUI, 0) p.resetDebugVisualizerCamera(self.pos_cam[0], self.pos_cam[1], self.pos_cam[2], [0, 0, 0]) p.getCameraImage(320, 200, renderer=p.ER_BULLET_HARDWARE_OPENGL) if self.gripper_display: self.line_width = 4 self.line_1 = p.addUserDebugLine( [0, 0, 0], [0.2, 0, 0], [255, 0, 0], lineWidth=self.line_width, parentObjectUniqueId=self.robot_id, parentLinkIndex=self.end_effector_id) self.line_2 = p.addUserDebugLine( [0, 0, 0], [0, 0.2, 0], [0, 255, 0], lineWidth=self.line_width, parentObjectUniqueId=self.robot_id, parentLinkIndex=self.end_effector_id) self.line_3 = p.addUserDebugLine( [0, 0, 0], [0, 0, 0.2], [0, 0, 255], lineWidth=self.line_width, parentObjectUniqueId=self.robot_id, parentLinkIndex=self.end_effector_id) sleep(1.)
def __init__(self,cap=cv2.VideoCapture(0),mod="Direct",epsilon=150,preprocess=None,resolution=(56,56,3)): self.cap = cap if mod == "GUI": self.clientId = p.connect(p.GUI) ## This is just to see the hand through opengl window so hence set this as you see the hand as you want to see else: self.clientId = p.connect(p.DIRECT) #p.setRealTimeSimulation(1,physicsClientId=self.clientId) p.resetDebugVisualizerCamera(cameraDistance=2, cameraYaw=0, cameraPitch=-40, cameraTargetPosition=[0,0,2],physicsClientId=self.clientId) self.action_space = spaces.Box(low=np.array([0]*10+[-0.52,-1.04]) ,high=np.array([1.55]*10+[0.52,1.04])) ## down and up (thumb, index, middle, ring, little) , wrist, elbow if len(resolution)!=3: raise Exception("Only a ndim n=3 image can be given as a input") self.res=resolution self.observation_space = spaces.Box(0,2.55,shape=(self.res[0],self.res[1]*2,self.res[2])) p.setAdditionalSearchPath(pybullet_data.getDataPath()) self.plane = p.loadURDF( "plane.urdf" , physicsClientId=self.clientId) p.setAdditionalSearchPath(os.path.abspath("Simulation")) self.handid = p.loadURDF(currentdir+"/hand.urdf",physicsClientId=self.clientId) for i in range(p.getNumJoints(self.handid,physicsClientId=self.clientId)): print(p.getJointInfo(bodyUniqueId=self.handid,jointIndex=i,physicsClientId=self.clientId)) if preprocess is None: self.hand_thresh=self.handmask else: self.hand_thresh=preprocess self.epsilon=epsilon ## Find a good one and set as default self.seed(int(time.time())) ## THis is to match up the no of pixels of our PHATTTT p.createConstraint( self.handid, -1, -1, -1, p.JOINT_FIXED, (0, 0, 1), (0, 0, 0), (0, 0, 0), physicsClientId=self.clientId ) finger_joint_indices = ( (2, 3), # thumb (4, 5), # index (6, 7), # middle (8, 9), # ring (10, 11), # little ) self.hand = robo_hand(self.handid,finger_joint_indices,clientId=self.clientId) self.resetState = p.saveState() self.reset()
def sample_trajs(self, goal, shape_class=Rectangle, placing=False): state = p.saveState() sample_joint = 6 original = ut.get_joint_position(self.robot, sample_joint) if placing: shape_frame_symmetries = shape_class.placement_symmetries() #shift the symmetrices by the grasp, since those are the actual angles we are sampling, not the robot joint angles. symmetries = [] for sampled_angle in shape_frame_symmetries: grasp_quat = self.grasp[1] grasp_yaw = ut.euler_from_quat(grasp_quat)[-1] #symmetries.append(sampled_angle+grasp_yaw) symmetries.append(sampled_angle) else: symmetries = shape_class.grasp_symmetries() self.grasp_offset = 0.003 + self.franka_tool_to_pb_link #originally 0.005 #All of this is to make sure the grasps are within joint limits ee_goals = [] grasps = [] joint_angles = [] for sym in symmetries: if original + sym < 3 and original + sym > -3: ut.set_joint_position(self.robot, sample_joint, original + sym) curr_pos = ut.get_joint_position(self.robot, sample_joint) ee_goal, grasp = self.get_closest_ee_goals( goal, shape_class=shape_class, grasp_offset=self.grasp_offset) ee_goals.append(ee_goal) grasps.append(grasp) joint_angles.append(original + sym) p.restoreState(state) #grasp = grasp_from_ee_and_obj(ee_goal, shape_goal) working_grasp = [] working_traj = [] working_joint_angles = [] for ee_goal, grasp, joint_angle in zip(ee_goals, grasps, joint_angles): grasp_traj = self.make_traj(ee_goal, shape_class=shape_class) if grasp_traj is not None: working_grasp.append(grasp) working_traj.append(grasp_traj) working_joint_angles.append(joint_angle) if len(working_traj) == 0: print("Could not find working traj. c to continue") return None #assert(len(working_traj) > 0) #import ipdb; ipdb.set_trace() #distances = [(working_joint_angle-original)**2 for working_joint_angle in working_joint_angles] distances = [(working_joint_angle - original)**2 for working_joint_angle in working_joint_angles] min_joint_angle_idxs = np.argsort(distances) return np.array(working_traj)[min_joint_angle_idxs], np.array( working_grasp)[min_joint_angle_idxs]
def setupSim(self): if self.SIM_READDY == 1: raise Exception("Simulation already set up") if self.GUI_ENABLED == 1: self.client_id = self.physicsClient = p.connect(p.GUI) p.resetDebugVisualizerCamera(cameraDistance=0.8, cameraYaw=90, cameraPitch=-10, cameraTargetPosition=[0, 0, 0], physicsClientId=self.client_id) if self.LOGDATA == 1: self.state_seq = [] self.state_dot_seq = [] self.actions_seq = [] print("Starting video") p.startStateLogging( p.STATE_LOGGING_VIDEO_MP4, self.video_path + "/Pendulum_training{}.mp4".format(self.sim_number), physicsClientId=self.client_id) else: self.client_id = self.physicsClient = p.connect(p.DIRECT) p.setAdditionalSearchPath(self.urdf_path) p.setGravity = (self.gravity[0], self.gravity[1], self.gravity[2], self.client_id) self.SIM_READDY = 1 p.setTimeStep(self.time_step, self.client_id) self.loadRobot() self.pos_limits_high = [ np.pi for i in range(p.getNumJoints(self.body_id)) ] #rad self.pos_limits_low = [ -np.pi for i in range(p.getNumJoints(self.body_id)) ] self.vel_limits_high = [ 0.7 * np.pi for i in range(p.getNumJoints(self.body_id)) ] self.vel_limits_low = [ -0.7 * np.pi for i in range(p.getNumJoints(self.body_id)) ] p.stepSimulation(physicsClientId=self.client_id) self.initial_frame = p.saveState(physicsClientId=self.client_id) self.observeState() print("Simulation is ready")
def occluding(object, camera_position_and_orientation, front_facing_axis, world=None): """ This reasoning query lists the objects which are occluding a given object. This works similar to 'visible'. First the object alone will be rendered and the position of the pixels of the object in the picture will be saved. After that the complete scene will be rendered and the previous saved pixel positions will be compared to the actual pixels, if in one pixel an other object is visible ot will be saved as occluding. :param object: The object for which occluding should be checked :param camera_position_and_orientation: The position and orientation of the camera in world coordinate frame :front_facing_axis: The axis, of the camera frame, which faces to the front of the robot. Given as list of x, y, z :param world: The BulletWorld if more than one BulletWorld is active :return: A list of occluding objects """ world, world_id = _world_and_id(world) occ_world = world.copy() state = p.saveState(physicsClientId=occ_world.client_id) for obj in occ_world.objects: if object.get_position_and_orientation( ) == obj.get_position_and_orientation(): object = obj else: p.resetBasePositionAndOrientation(obj.id, [100, 100, 100], [0, 0, 0, 1], occ_world.client_id) world_T_cam = camera_position_and_orientation cam_T_point = list(np.multiply(front_facing_axis, 2)) target_point = p.multiplyTransforms(world_T_cam[0], world_T_cam[1], cam_T_point, [0, 0, 0, 1]) seg_mask = _get_seg_mask_for_target(target_point, world_T_cam, occ_world) pixels = [] for i in range(0, 256): for j in range(0, 256): if seg_mask[i][j] == object.id: pixels.append((i, j)) p.restoreState(state, physicsClientId=occ_world.client_id) occluding = [] seg_mask = _get_seg_mask_for_target(target_point, world_T_cam, occ_world) for c in pixels: if not seg_mask[c[0]][c[1]] == object.id: occluding.append(seg_mask[c[0]][c[1]]) occ_objects = list( set(map(lambda x: occ_world.get_object_by_id(x), occluding))) occ_world.exit() return occ_objects
def make_traj(self, goal_pose, shape_class=Rectangle, in_board=False): state = p.saveState() #quat = p.getLinkState(self.robot, self.grasp_joint)[1] quat = [1, 0, 0, 0] #all of this only really makes sense with a full robot joints_to_plan_for = ut.get_movable_joints( self.robot)[:-2] #all but the fingers end_conf = self.compute_IK(goal_pose, joints_to_plan_for, cur_pose=ut.get_joint_positions( self.robot, joints_to_plan_for)) if end_conf is None: print("No IK solution found") p.restoreState(state) return None end_conf = end_conf[:len(joints_to_plan_for)] p.restoreState(state) #for attachment in self.in_hand: # attachment.assign() obstacles = [ obs for obs in [ self.obstacle, self.square, self.circle, self.topcamera_block, self.frontcamera_block ] if obs is not None and obs != self.shape_name_to_shape[shape_class.__name__] ] if not in_board: obstacles.append(self.board) #disabled_body_collisions = [(self.board, self.rectangle), (self.hole, self.rectangle),(self.board, self.square),(self.robot, self.shape_name_to_shape[shape_class.__name__])] disabled_body_collisions = [ (self.hole, self.rectangle), (self.robot, self.shape_name_to_shape[shape_class.__name__]) ] #if len(self.in_hand) == 1: # disabled_collisions.append((self.in_hand[0].child, self.board)) # disabled_collisions.append((self.board, self.in_hand[0].child)) traj = ut.plan_joint_motion( self.robot, joints_to_plan_for, end_conf, obstacles=obstacles, attachments=self.in_hand, self_collisions=True, disabled_body_collisions=disabled_body_collisions, weights=None, resolutions=None, smooth=100, restarts=20, iterations=100) p.restoreState(state) return traj
def visible(object, camera_position_and_orientation, front_facing_axis, threshold=0.8, world=None): """ This reasoning query checks if an object is visible from a given position. This will be achieved by rendering the object alone and counting the visible pixel, then rendering the complete scene and compare the visible pixels with the absolut count of pixels. :param object: The object for which the visibility should be checked :param camera_position_and_orientation: The position and orientation of the camera in world coordinate fram :front_facing_axis: The axis, of the camera frame, which faces to the front of the robot. Given as list of x, y, z :threshold: The minimum percentage of the object that needs to be visibile for this method to return true :param world: The BulletWorld if more than one BulletWorld is active :return: True if the object is visible from the camera_position False if not """ world, world_id = _world_and_id(world) det_world = world.copy() state = p.saveState(physicsClientId=det_world.client_id) for obj in det_world.objects: if object.get_position_and_orientation( ) == obj.get_position_and_orientation(): object = obj else: p.resetBasePositionAndOrientation(obj.id, [100, 100, 100], [0, 0, 0, 1], det_world.client_id) world_T_cam = camera_position_and_orientation cam_T_point = list(np.multiply(front_facing_axis, 2)) target_point = p.multiplyTransforms(world_T_cam[0], world_T_cam[1], cam_T_point, [0, 0, 0, 1]) seg_mask = _get_seg_mask_for_target(target_point, world_T_cam, det_world) flat_list = list(itertools.chain.from_iterable(seg_mask)) max_pixel = sum(list(map(lambda x: 1 if x == object.id else 0, flat_list))) p.restoreState(state, physicsClientId=det_world.client_id) if max_pixel == 0: # Object is not visible return False seg_mask = _get_seg_mask_for_target(target_point, world_T_cam, world) flat_list = list(itertools.chain.from_iterable(seg_mask)) real_pixel = sum(list(map(lambda x: 1 if x == object.id else 0, flat_list))) det_world.exit() return real_pixel / max_pixel > threshold > 0
def cost_function(self, tau, des_a, leg_down): time_step = 1/1000.0 # 单步时间长度 rid = self.robot_id state_id = p.saveState() # 保存当前系统状态 md = p.TORQUE_CONTROL # 扭矩控制模式 leg_id_li = [0, 1, 2, 4, 5, 6] for i in range(6): p.setJointMotorControl2(rid, leg_id_li[i], md, tau[i]) # 1. 获取前一时刻状态 # 机体速度和角速度 b_lv, b_av = np.array(p.getBaseVelocity(rid)) # 摆动腿位置 if leg_down == 'left': vel_a = p.getJointState(rid, 4)[1] vel_b = p.getJointState(rid, 5)[1] vel_c = p.getJointState(rid, 6)[1] else: vel_a = p.getJointState(rid, 0)[1] vel_b = p.getJointState(rid, 1)[1] vel_c = p.getJointState(rid, 2)[1] # 2. 进行一步仿真 p.stepSimulation() # 进行一个步长为1/240s的仿真 # 3. 获取仿真一步后的速度等状态 # body加速度 acc_bv = (np.array(p.getBaseVelocity(self.robot_id)[0]) - b_lv)/time_step # 摆动腿加速度 if leg_down == 'left': acc_a = (p.getJointState(rid, 0)[1] - vel_a) / time_step acc_b = (p.getJointState(rid, 1)[1] - vel_b) / time_step acc_c = (p.getJointState(rid, 2)[1] - vel_c) / time_step else: acc_a = (p.getJointState(rid, 4)[1] - vel_a) / time_step acc_b = (p.getJointState(rid, 5)[1] - vel_b) / time_step acc_c = (p.getJointState(rid, 6)[1] - vel_c) / time_step acc_foot = np.array([acc_a, acc_b, acc_c]) # body角加速度 acc_ba = (np.array(p.getBaseVelocity(self.robot_id)[1]) - b_av)/time_step # 4. 与期望加速度计算Cost函数 w_com = 25 w_bod = np.array([20, 60, 14]) w_foot = 1 c_com = np.linalg.norm(w_com * (acc_bv - des_a['com'])) c_bod = np.linalg.norm(w_bod * (acc_ba - des_a['body'])) c_foo = np.linalg.norm(w_foot * (acc_foot - des_a['foot'])) # c_tau= 0.001 * sum(abs(tau)) # 关节扭矩之和 cost = c_com + c_bod + c_foo # 获取地面接触力状态,似乎pybullet没法获取切向力的,只有法向力 #contact_list = p.getContactPoints(rid) # 5. 恢复状态并返回cost p.restoreState(state_id) return cost
def _reset(self): if (self.stateId>=0): #print("restoreState self.stateId:",self.stateId) p.restoreState(self.stateId) r = MJCFBaseBulletEnv._reset(self) p.configureDebugVisualizer(p.COV_ENABLE_RENDERING,0) self.parts, self.jdict, self.ordered_joints, self.robot_body = self.robot.addToScene( self.stadium_scene.ground_plane_mjcf) self.ground_ids = set([(self.parts[f].bodies[self.parts[f].bodyIndex], self.parts[f].bodyPartIndex) for f in self.foot_ground_object_names]) p.configureDebugVisualizer(p.COV_ENABLE_RENDERING,1) if (self.stateId<0): self.stateId=p.saveState() #print("saving state self.stateId:",self.stateId) return r
coordPos2= [0, 0, 0 ] coordOrnEuler2= [0, 0, 0] invPos,invOrn=p.invertTransform(coordPos,p.getQuaternionFromEuler(coordOrnEuler)) mPos,mOrn = p.multiplyTransforms(invPos,invOrn, coordPos2,p.getQuaternionFromEuler(coordOrnEuler2)) eul = p.getEulerFromQuaternion(mOrn) print("rpy=\"",eul[0],eul[1],eul[2],"\" xyz=\"", mPos[0],mPos[1], mPos[2]) shift=0 gui = 1 frame=0 states=[] states.append(p.saveState()) #observations=[] #observations.append(obs) running = True reverting = False p.getCameraImage(320,200)#,renderer=p.ER_BULLET_HARDWARE_OPENGL ) while (1): updateCam = 0 keys = p.getKeyboardEvents() for k,v in keys.items(): if (reverting or (k == p.B3G_LEFT_ARROW and (v&p.KEY_WAS_TRIGGERED))): reverting=True stateIndex = len(states)-1
print(q) for i in range (numSteps2): p.stepSimulation() file = open("saveFile.txt","w") dumpStateToFile(file) file.close() ################################# setupWorld() #both restore from file or from in-memory state should work p.restoreState(fileName="state.bullet") stateId = p.saveState() if verbose: p.setInternalSimFlags(1) p.stepSimulation() if verbose: p.setInternalSimFlags(0) print("contact points restored=") for q in p.getContactPoints(): print(q) for i in range (numSteps2): p.stepSimulation() file = open("restoreFile.txt","w") dumpStateToFile(file)
def testSaveRestoreState(self): numSteps = 500 numSteps2 = 30 verbose = 0 self.setupWorld() for i in range(numSteps): p.stepSimulation() p.saveBullet("state.bullet") if verbose: p.setInternalSimFlags(1) p.stepSimulation() if verbose: p.setInternalSimFlags(0) print("contact points=") for q in p.getContactPoints(): print(q) for i in range(numSteps2): p.stepSimulation() file = open("saveFile.txt", "w") self.dumpStateToFile(file) file.close() ################################# self.setupWorld() #both restore from file or from in-memory state should work p.restoreState(fileName="state.bullet") stateId = p.saveState() if verbose: p.setInternalSimFlags(1) p.stepSimulation() if verbose: p.setInternalSimFlags(0) print("contact points restored=") for q in p.getContactPoints(): print(q) for i in range(numSteps2): p.stepSimulation() file = open("restoreFile.txt", "w") self.dumpStateToFile(file) file.close() p.restoreState(stateId) if verbose: p.setInternalSimFlags(1) p.stepSimulation() if verbose: p.setInternalSimFlags(0) print("contact points restored=") for q in p.getContactPoints(): print(q) for i in range(numSteps2): p.stepSimulation() file = open("restoreFile2.txt", "w") self.dumpStateToFile(file) file.close() file1 = open("saveFile.txt", "r") file2 = open("restoreFile.txt", "r") self.compareFiles(file1, file2) file1.close() file2.close() file1 = open("saveFile.txt", "r") file2 = open("restoreFile2.txt", "r") self.compareFiles(file1, file2) file1.close() file2.close()