def test_multiagent(): s = Simulator(mode='headless') scene = StadiumScene() s.import_scene(scene) turtlebot1 = Turtlebot(config) turtlebot2 = Turtlebot(config) turtlebot3 = Turtlebot(config) s.import_robot(turtlebot1) s.import_robot(turtlebot2) s.import_robot(turtlebot3) turtlebot1.set_position([1, 0, 0.5]) turtlebot2.set_position([0, 0, 0.5]) turtlebot3.set_position([-1, 0, 0.5]) nbody = p.getNumBodies() for i in range(100): s.step() s.disconnect() assert nbody == 7
def test_import_rbo_object(): s = Simulator(mode='headless') try: scene = StadiumScene() s.import_scene(scene) obj = RBOObject('book') s.import_articulated_object(obj) obj2 = RBOObject('microwave') s.import_articulated_object(obj2) obj.set_position([0, 0, 2]) obj2.set_position([0, 1, 2]) obj3 = InteractiveObj( os.path.join(gibson2.assets_path, 'models', 'scene_components', 'door.urdf')) s.import_articulated_object(obj3) for i in range(100): s.step() finally: s.disconnect()
def test_import_box(): s = Simulator(mode='headless') scene = StadiumScene() s.import_scene(scene) print(s.objects) # wall = [pos, dim] wall = [[[0, 7, 1.01], [10, 0.2, 1]], [[0, -7, 1.01], [6.89, 0.1, 1]], [[7, -1.5, 1.01], [0.1, 5.5, 1]], [[-7, -1, 1.01], [0.1, 6, 1]], [[-8.55, 5, 1.01], [1.44, 0.1, 1]], [[8.55, 4, 1.01], [1.44, 0.1, 1]]] obstacles = [[[-0.5, 2, 1.01], [3.5, 0.1, 1]], [[4.5, -1, 1.01], [1.5, 0.1, 1]], [[-4, -2, 1.01], [0.1, 2, 1]], [[2.5, -4, 1.01], [1.5, 0.1, 1]]] for i in range(len(wall)): curr = wall[i] obj = BoxShape(curr[0], curr[1]) s.import_object(obj) for i in range(len(obstacles)): curr = obstacles[i] obj = BoxShape(curr[0], curr[1]) s.import_object(obj) config = parse_config(os.path.join(gibson2.root_path, '../test/test.yaml')) turtlebot1 = Turtlebot(config) turtlebot2 = Turtlebot(config) s.import_robot(turtlebot1) s.import_robot(turtlebot2) turtlebot1.set_position([6., -6., 0.]) turtlebot2.set_position([-3., 4., 0.]) for i in range(100): s.step() s.disconnect()
class BaseEnv(gym.Env): ''' a basic environment, step, observation and reward not implemented ''' def __init__(self, config_file, model_id=None, mode='headless', action_timestep=1 / 10.0, physics_timestep=1 / 240.0, device_idx=0): """ :param config_file: config_file path :param model_id: override model_id in config file :param mode: headless or gui mode :param action_timestep: environment executes action per action_timestep second :param physics_timestep: physics timestep for pybullet :param device_idx: device_idx: which GPU to run the simulation and rendering on """ self.config = parse_config(config_file) if model_id is not None: self.config['model_id'] = model_id self.mode = mode self.action_timestep = action_timestep self.physics_timestep = physics_timestep self.simulator = Simulator( mode=mode, timestep=physics_timestep, use_fisheye=self.config.get('fisheye', False), image_width=self.config.get('image_width', 128), image_height=self.config.get('image_height', 128), vertical_fov=self.config.get('vertical_fov', 90), device_idx=device_idx, auto_sync=False) self.simulator_loop = int(self.action_timestep / self.simulator.timestep) self.load() def reload(self, config_file): """ Reload another config file, this allows one to change the envrionment on the fly :param config_file: new config file path """ self.config = parse_config(config_file) self.simulator.reload() self.load() def reload_model(self, model_id): """ Reload another model, this allows one to change the envrionment on the fly :param model_id: new model_id """ self.config['model_id'] = model_id self.simulator.reload() self.load() def load(self): """ Load the scene and robot """ if self.config['scene'] == 'empty': scene = EmptyScene() elif self.config['scene'] == 'stadium': scene = StadiumScene() elif self.config['scene'] == 'building': scene = BuildingScene( self.config['model_id'], waypoint_resolution=self.config.get('waypoint_resolution', 0.2), num_waypoints=self.config.get('num_waypoints', 10), build_graph=self.config.get('build_graph', False), trav_map_resolution=self.config.get('trav_map_resolution', 0.1), trav_map_erosion=self.config.get('trav_map_erosion', 2), is_interactive=self.config.get('is_interactive', False), pybullet_load_texture=self.config.get('pybullet_load_texture', False), ) self.simulator.import_scene(scene, load_texture=self.config.get( 'load_texture', True)) if self.config['robot'] == 'Turtlebot': robot = Turtlebot(self.config) elif self.config['robot'] == 'Husky': robot = Husky(self.config) elif self.config['robot'] == 'Ant': robot = Ant(self.config) elif self.config['robot'] == 'Humanoid': robot = Humanoid(self.config) elif self.config['robot'] == 'JR2': robot = JR2(self.config) elif self.config['robot'] == 'JR2_Kinova': robot = JR2_Kinova(self.config) elif self.config['robot'] == 'Freight': robot = Freight(self.config) elif self.config['robot'] == 'Fetch': robot = Fetch(self.config) elif self.config['robot'] == 'Locobot': robot = Locobot(self.config) else: raise Exception('unknown robot type: {}'.format( self.config['robot'])) self.scene = scene self.robots = [robot] for robot in self.robots: self.simulator.import_robot(robot) def clean(self): """ Clean up """ if self.simulator is not None: self.simulator.disconnect() def simulator_step(self): """ Step the simulation, this is different from environment step where one can get observation and reward """ self.simulator.step() def step(self, action): """ Overwritten by subclasses """ return NotImplementedError() def reset(self): """ Overwritten by subclasses """ return NotImplementedError() def set_mode(self, mode): self.simulator.mode = mode
class BaseEnv(gym.Env): ''' a basic environment, step, observation and reward not implemented ''' def __init__(self, config_file, model_id=None, mode='headless', device_idx=0): """ :param config_file: config_file path :param model_id: override model_id in config file :param mode: headless or gui mode :param device_idx: which GPU to run the simulation and rendering on """ self.config = parse_config(config_file) if model_id is not None: self.config['model_id'] = model_id self.simulator = Simulator( mode=mode, use_fisheye=self.config.get('fisheye', False), resolution=self.config.get('resolution', 64), fov=self.config.get('fov', 90), device_idx=device_idx) self.load() def reload(self, config_file): """ Reload another config file, this allows one to change the envrionment on the fly :param config_file: new config file path """ self.config = parse_config(config_file) self.simulator.reload() self.load() def load(self): """ Load the scene and robot """ if self.config['scene'] == 'stadium': scene = StadiumScene() elif self.config['scene'] == 'building': scene = BuildingScene( self.config['model_id'], build_graph=self.config.get('build_graph', False), trav_map_erosion=self.config.get('trav_map_erosion', 2), should_load_replaced_objects=self.config.get( 'should_load_replaced_objects', False)) # scene: class_id = 0 # robot: class_id = 1 # objects: class_id > 1 self.simulator.import_scene(scene, load_texture=self.config.get( 'load_texture', True), class_id=0) if self.config['robot'] == 'Turtlebot': robot = Turtlebot(self.config) elif self.config['robot'] == 'Husky': robot = Husky(self.config) elif self.config['robot'] == 'Ant': robot = Ant(self.config) elif self.config['robot'] == 'Humanoid': robot = Humanoid(self.config) elif self.config['robot'] == 'JR2': robot = JR2(self.config) elif self.config['robot'] == 'JR2_Kinova': robot = JR2_Kinova(self.config) elif self.config['robot'] == 'Freight': robot = Freight(self.config) elif self.config['robot'] == 'Fetch': robot = Fetch(self.config) else: raise Exception('unknown robot type: {}'.format( self.config['robot'])) self.scene = scene self.robots = [robot] for robot in self.robots: self.simulator.import_robot(robot, class_id=1) def clean(self): """ Clean up """ if self.simulator is not None: self.simulator.disconnect() def simulator_step(self): """ Step the simulation, this is different from environment step where one can get observation and reward """ self.simulator.step() def step(self, action): return NotImplementedError def reset(self): return NotImplementedError def set_mode(self, mode): self.simulator.mode = mode
def rollout(traj, headless=False): SCENE, traj_id, instr_id, instr, starting_coords = traj # instr = "Keep going and don't stop" # starting_coords = [0, 0, 0] seq = tok.encode_sentence(instr) tokens = tok.split_sentence(instr) seq_lengths = [np.argmax(seq == padding_idx, axis=0)] seq = torch.from_numpy(np.expand_dims(seq, 0)).cuda() # seq_lengths[seq_lengths == 0] = seq.shape[1] # Full length ctx, h_t, c_t, ctx_mask = encoder(seq, seq_lengths) question = h_t pre_feat = torch.zeros(batch_size, opts.img_feat_input_dim).cuda() pre_ctx_attend = torch.zeros(batch_size, opts.rnn_hidden_size).cuda() # Gibson stuff # 72 fov for 600, 60 for 480 # mode = gui for debug, headless for run s = Simulator(mode='gui', resolution=640, fov=75, panorama=True) scene = BuildingScene(SCENE) # scene = StadiumScene() ids = s.import_scene(scene) robot = Turtlebot(config) ped_id = s.import_robot(robot) heading_feat_tensor = torch.Tensor(heading_elevation_feat()).view([im_per_ob, 128]).cuda() s.step() robot.set_position(starting_coords) def apply_action(bot: robot, action_idx: int, depth_ok: list, headless=False) -> bool: print(action_idx) # action_idx is expected to be 0-13, TODO: make nicer... if action_idx == 0 or action_idx > 12 or not depth_ok[action_idx - 1]: print("STOP") return True action_idx -= 1 #if action_idx < 3 or (action_idx < 12 and action_idx > 9): bot.turn_right(0.5235988 * action_idx) s.step() if(not headless): time.sleep(0.2) bot.move_forward(0.5) return False # else: # if action_idx < 7: # bot.turn_left(1.57) # else: # bot.turn_right(1.57) bot_is_running = True while bot_is_running: s.step() gib_out = s.renderer.render_robot_cameras(modes=('rgb', '3d')) rgb = gib_out[::2] depth = np.array(gib_out[1::2]) processed_rgb = list(map(transform_img, rgb)) batch_obs = np.concatenate(processed_rgb) imgnet_input = torch.Tensor(batch_obs).cuda() imgnet_output = torch.zeros([im_per_ob, 2048]).cuda() # depth processing and filtering # depth: [36, ] depth *= depth depth = depth[:, :, :, :3].sum(axis=3) depth = np.sqrt(depth) # filter out 0 distances that are presumably from infinity dist depth[depth < 0.0001] = 10 # TODO: generalize to non-horizontal moves depth_ok = depth[12:24, 200:440, 160:480].min(axis=2).min(axis=1) fig=plt.figure(figsize=(8, 2)) for n, i in enumerate([0, 3, 6, 9]): fig.add_subplot(1, 4, n + 1) plt.imshow(depth[12 + i]) plt.show() # depth_ok *= depth_ok > 1 print(depth_ok) depth_ok = depth_ok > 0.8 print(depth_ok) # Each observation has 36 inputs # We pass rgb images through frozen embedder for i in range(im_per_ob // B_S): def hook_fn(m, last_input, o): imgnet_output[i*B_S:(i+1)*B_S, :] = \ o.detach().squeeze(2).squeeze(2) imgnet_input[B_S * i : B_S * (i + 1)] # imgnet_output[B_S * i : B_S * (i + 1)] = resnet(minibatch).detach() imgnet_output = torch.cat([imgnet_output, heading_feat_tensor], 1) pano_img_feat = imgnet_output.view([1, im_per_ob, 2176]) navigable_feat = torch.zeros([1, 16, 2176]).cuda() navigable_feat[0, 1:13] = imgnet_output[12:24] * torch.Tensor(depth_ok).cuda().view(12, 1) # TODO: make nicer as stated above navigable_index = [list(map(int, depth_ok))] print(navigable_index) # NB: depth_ok replaces navigable_index h_t, c_t, pre_ctx_attend, img_attn, ctx_attn, logit, value, navigable_mask = model( pano_img_feat, navigable_feat, pre_feat, question, h_t, c_t, ctx, pre_ctx_attend, navigable_index, ctx_mask) print("ATTN") print(ctx_attn[0]) print(img_attn[0]) plt.bar(range(len(tokens)), ctx_attn.detach().cpu()[0][:len(tokens)]) plt.xticks(range(len(tokens)), tokens) plt.show() plt.bar(range(16), img_attn.detach().cpu()[0]) plt.show() print("NMASK") print(navigable_mask) logit.data.masked_fill_((navigable_mask == 0).data, -float('inf')) m = torch.Tensor([[False] + list(map(lambda b: not b, navigable_index[0])) + [False, False, False]], dtype=bool).cuda() logit.data.masked_fill_(m, -float('inf')) action = _select_action(logit, [False]) ended = apply_action(robot, action[0], depth_ok) bot_is_running = not ended or not headless if not headless: time.sleep(.3)
import yaml from gibson2.core.physics.robot_locomotors import Turtlebot from gibson2.core.simulator import Simulator from gibson2.core.physics.scene import BuildingScene, StadiumScene from gibson2.utils.utils import parse_config import pytest import pybullet as p import numpy as np from gibson2.core.render.profiler import Profiler config = parse_config('../configs/turtlebot_p2p_nav.yaml') s = Simulator(mode='gui', resolution=512, render_to_tensor=False) scene = BuildingScene('Bolton') s.import_scene(scene) turtlebot = Turtlebot(config) s.import_robot(turtlebot) p.resetBasePositionAndOrientation(scene.ground_plane_mjcf[0], posObj=[0, 0, -10], ornObj=[0, 0, 0, 1]) for i in range(2000): with Profiler('simulator step'): turtlebot.apply_action([0.1, 0.1]) s.step() rgb = s.renderer.render_robot_cameras(modes=('rgb')) s.disconnect()
class BaseEnv(gym.Env): ''' a basic environment, step, observation and reward not implemented ''' def __init__(self, config_file, mode='headless', device_idx=0): self.config = parse_config(config_file) self.simulator = Simulator(mode=mode, use_fisheye=self.config.get( 'fisheye', False), resolution=self.config['resolution'], device_idx=device_idx) self.load() def reload(self, config_file): self.config = parse_config(config_file) self.simulator.reload() self.load() def load(self): if self.config['scene'] == 'stadium': scene = StadiumScene() elif self.config['scene'] == 'building': scene = BuildingScene(self.config['model_id']) self.simulator.import_scene(scene) if self.config['robot'] == 'Turtlebot': robot = Turtlebot(self.config) elif self.config['robot'] == 'Husky': robot = Husky(self.config) elif self.config['robot'] == 'Ant': robot = Ant(self.config) elif self.config['robot'] == 'Humanoid': robot = Humanoid(self.config) elif self.config['robot'] == 'JR2': robot = JR2(self.config) elif self.config['robot'] == 'JR2_Kinova': robot = JR2_Kinova(self.config) else: raise Exception('unknown robot type: {}'.format( self.config['robot'])) self.scene = scene self.robots = [robot] for robot in self.robots: self.simulator.import_robot(robot) def clean(self): if not self.simulator is None: self.simulator.disconnect() def simulator_step(self): self.simulator.step() def step(self, action): return NotImplementedError def reset(self): return NotImplementedError def set_mode(self, mode): self.simulator.mode = mode
def main(): config = parse_config('../configs/fetch_p2p_nav.yaml') s = Simulator(mode='gui', timestep=1 / 240.0) scene = EmptyScene() s.import_scene(scene) fetch = Fetch(config) s.import_robot(fetch) robot_id = fetch.robot_ids[0] arm_joints = joints_from_names(robot_id, [ 'torso_lift_joint', 'shoulder_pan_joint', 'shoulder_lift_joint', 'upperarm_roll_joint', 'elbow_flex_joint', 'forearm_roll_joint', 'wrist_flex_joint', 'wrist_roll_joint' ]) #finger_joints = joints_from_names(robot_id, ['l_gripper_finger_joint', 'r_gripper_finger_joint']) fetch.robot_body.reset_position([0, 0, 0]) fetch.robot_body.reset_orientation([0, 0, 1, 0]) x, y, z = fetch.get_end_effector_position() #set_joint_positions(robot_id, finger_joints, [0.04,0.04]) print(x, y, z) visual_marker = p.createVisualShape(p.GEOM_SPHERE, radius=0.02) marker = p.createMultiBody(baseVisualShapeIndex=visual_marker) #max_limits = [0,0] + get_max_limits(robot_id, arm_joints) + [0.05,0.05] #min_limits = [0,0] + get_min_limits(robot_id, arm_joints) + [0,0] #rest_position = [0,0] + list(get_joint_positions(robot_id, arm_joints)) + [0.04,0.04] max_limits = [0, 0] + get_max_limits(robot_id, arm_joints) min_limits = [0, 0] + get_min_limits(robot_id, arm_joints) rest_position = [0, 0] + list(get_joint_positions(robot_id, arm_joints)) joint_range = list(np.array(max_limits) - np.array(min_limits)) joint_range = [item + 1 for item in joint_range] jd = [0.1 for item in joint_range] print(max_limits) print(min_limits) def accurateCalculateInverseKinematics(robotid, endEffectorId, targetPos, threshold, maxIter): sample_fn = get_sample_fn(robotid, arm_joints) set_joint_positions(robotid, arm_joints, sample_fn()) it = 0 while it < maxIter: jointPoses = p.calculateInverseKinematics(robotid, endEffectorId, targetPos, lowerLimits=min_limits, upperLimits=max_limits, jointRanges=joint_range, restPoses=rest_position, jointDamping=jd) set_joint_positions(robotid, arm_joints, jointPoses[2:10]) ls = p.getLinkState(robotid, endEffectorId) newPos = ls[4] dist = np.linalg.norm(np.array(targetPos) - np.array(newPos)) if dist < threshold: break it += 1 print("Num iter: " + str(it) + ", threshold: " + str(dist)) return jointPoses while True: with Profiler("Simulation step"): fetch.robot_body.reset_position([0, 0, 0]) fetch.robot_body.reset_orientation([0, 0, 1, 0]) threshold = 0.01 maxIter = 100 joint_pos = accurateCalculateInverseKinematics( robot_id, fetch.parts['gripper_link'].body_part_index, [x, y, z], threshold, maxIter)[2:10] #set_joint_positions(robot_id, finger_joints, [0.04, 0.04]) s.step() keys = p.getKeyboardEvents() for k, v in keys.items(): if (k == p.B3G_RIGHT_ARROW and (v & p.KEY_IS_DOWN)): x += 0.01 if (k == p.B3G_LEFT_ARROW and (v & p.KEY_IS_DOWN)): x -= 0.01 if (k == p.B3G_UP_ARROW and (v & p.KEY_IS_DOWN)): y += 0.01 if (k == p.B3G_DOWN_ARROW and (v & p.KEY_IS_DOWN)): y -= 0.01 if (k == ord('z') and (v & p.KEY_IS_DOWN)): z += 0.01 if (k == ord('x') and (v & p.KEY_IS_DOWN)): z -= 0.01 p.resetBasePositionAndOrientation(marker, [x, y, z], [0, 0, 0, 1]) s.disconnect()