def switch_model(self, model_selection): if model_selection == 'in_order': self.model_index = (self.model_index + 1) % len(self.model_ids) self.model_id = self.model_ids[self.model_index] self.model_path = get_model_path(self.model_id) elif model_selection == 'random': self.model_id = np.random.choice(self.model_ids) self.model_path = get_model_path(self.model_id) self.locations = self.all_locations[self.model_id] self.n_locations = self.locations.shape[0] self.default_z = self.z_coordinates[self.model_id] if self.render_map: mesh_file = os.path.join(get_model_path(self.model_id), "mesh_z_up.obj") self.map_renderer = NavigationMapRenderer(mesh_file, self.default_z, 0.1, render_resolution=self.render_resolution)
def __init__(self, config, gpu_idx, scene_type, tracking_camera): ## The following properties are already instantiated inside xxx_env.py: BaseRobotEnv.__init__(self, config, tracking_camera, scene_type, gpu_idx) if self.gui: self.screen_arr = np.zeros([512, 512, 3]) self.test_env = "TEST_ENV" in os.environ.keys() and os.environ['TEST_ENV'] == "True" self._use_filler = config["use_filler"] self._require_camera_input = 'rgb_filled' in self.config["output"] or \ 'rgb_prefilled' in self.config["output"] or \ 'depth' in self.config["output"] or \ 'normal' in self.config["output"] or \ 'semantics' in self.config["output"] self._require_rgb = 'rgb_filled' in self.config["output"] or "rgb_prefilled" in self.config["output"] self._require_depth = 'depth' in self.config["output"] self._require_normal = 'depth' in self.config["output"] self._require_semantics = 'semantics' in self.config["output"] self._semantic_source = 1 self._semantic_color = 1 if self._require_semantics: assert "semantic_source" in self.config.keys(), "semantic_source not specified in configuration" assert "semantic_color" in self.config.keys(), "semantic_color not specified in configuration" assert self.config["semantic_source"] in [1, 2], "semantic_source not valid" assert self.config["semantic_color"] in [1, 2, 3], "semantic_source not valid" self._semantic_source = self.config["semantic_source"] self._semantic_color = self.config["semantic_color"] self._require_normal = 'normal' in self.config["output"] #if self._require_camera_input: self.model_path = get_model_path(self.model_id) self.save_frame = 0 self.fps = 0
def __init__(self, config, gpu_count=0, texture=True, valid_locations=None, render_map=True, fixed_endpoints=False): HuskyNavigateEnv.__init__(self, config, gpu_count) self.use_valid_locations = valid_locations is not None self.fixed_endpoints = fixed_endpoints if self.use_valid_locations: print("Using valid locations!") self.valid_locations = self.get_valid_locations(valid_locations) self.n_locations = self.valid_locations.shape[0] self.target_distance_mu = self.config["target_distance_mu"] self.target_distance_sigma = self.config["target_distance_sigma"] self.min_target_x, self.max_target_x = self.config["x_target_range"] self.min_target_y, self.max_target_y = self.config["y_target_range"] self.min_agent_x, self.max_agent_x = self.config["x_boundary"] self.min_agent_y, self.max_agent_y = self.config["y_boundary"] self.min_spawn_x, self.max_spawn_x = self.config["x_spawn_range"] self.min_spawn_y, self.max_spawn_y = self.config["y_spawn_range"] self.default_z = self.config["initial_pos"][2] self.cube_size = 0.2 self.target_radius = 0.5 self.resolution = self.config["resolution"] self.cube_image = np.zeros((self.config["resolution"], self.config["resolution"], 3), np.uint32) self.target_x = np.random.uniform(self.min_target_x, self.max_target_x) self.target_y = np.random.uniform(self.min_target_y, self.max_target_y) self.min_target_distance = self.config["min_target_distance"] self.max_target_distance = self.config["max_target_distance"] self.use_texture = False if texture: self.use_texture = True self.texture_path = os.path.join(os.path.dirname(os.path.abspath(assets.__file__)), "wood.jpg") self.load_texture(self.texture_path) self.render_map = render_map if render_map: mesh_file = os.path.join(get_model_path(self.config["model_id"]), "mesh_z_up.obj") self.map_renderer = NavigationMapRenderer(mesh_file, self.default_z, 0.1, render_resolution=self.resolution)
def run_depth_render(): model_path = get_model_path()[0] dr_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'depth_render') os.chdir(dr_path) print("./depth_render --modelpath {}".format(model_path)) os.system("./depth_render --modelpath {}".format(model_path))
def __init__(self, config, gpu_count=0, start_locations=None, render_map=True, fixed_endpoints=False): HuskyNavigateEnv.__init__(self, config, gpu_count) self.fixed_endpoints = fixed_endpoints self.default_z = self.config["initial_pos"][2] self.target_mu = self.config["target_distance_mu"] self.target_sigma = self.config["target_distance_sigma"] self.locations = self.get_valid_locations(start_locations) self.n_locations = self.locations.shape[0] self.start_location = self.select_agent_location() self.config["initial_pos"] = [ self.start_location[0], self.start_location[1], self.default_z ] self.target_location = self.select_target() self.target_radius = 0.5 self.render_map = render_map self.render_resolution = 256 if render_map: mesh_file = os.path.join(get_model_path(self.config["model_id"]), "mesh_z_up.obj") self.map_renderer = NavigationMapRenderer( mesh_file, self.default_z, 0.1, render_resolution=self.render_resolution)
def __init__(self, config, gpu_count=0, start_locations_file=None, fixed_endpoints=False, render=True): HuskyExplorationEnv.__init__(self, config, gpu_count, start_locations_file, fixed_endpoints) self.min_depth = 0.0 self.max_depth = 1.5 self.fov = self.config["fov"] self.screen_dim = self.config["resolution"] self.render = render if render: mesh_file = os.path.join(get_model_path(self.config["model_id"]), "mesh_z_up.obj") self.map_renderer = ExplorationMapRenderer(mesh_file, self.default_z, 0.1, self.cell_size, render_resolution = self.screen_dim)
def __init__(self, config, gpu_count, scene_type, use_filler=True): ## The following properties are already instantiated inside xxx_env.py: # @self.timestep # @self.frame_skip if self.gui: #self.screen = pygame.display.set_mode([612, 512], 0, 32) self.screen_arr = np.zeros([612, 512, 3]) self.test_env = "TEST_ENV" in os.environ.keys( ) and os.environ['TEST_ENV'] == "True" #assert (mode in ["GREY", "RGB", "RGBD", "DEPTH", "SENSOR"]), \ # "Environment mode must be RGB/RGBD/DEPTH/SENSOR" self._use_filler = use_filler self._require_camera_input = 'rgb_filled' in self.config["output"] self._require_semantics = 'semantics' in self.config["output"] self._semantic_source = 1 self._semantic_color = 1 if self._require_semantics: assert "semantic_source" in self.config.keys( ), "semantic_source not specified in configuration" assert "semantic_color" in self.config.keys( ), "semantic_color not specified in configuration" assert self.config["semantic_source"] in [ 1, 2 ], "semantic_source not valid" assert self.config["semantic_color"] in [ 1, 2 ], "semantic_source not valid" self._semantic_source = self.config["semantic_source"] self._semantic_color = self.config["semantic_color"] self._require_normal = 'normal' in self.config["output"] if self._require_camera_input: self.model_path = get_model_path(self.model_id) BaseRobotEnv.__init__(self, config, scene_type, gpu_count) cube_id = p.createCollisionShape(p.GEOM_MESH, fileName=os.path.join( pybullet_data.getDataPath(), 'cube.obj'), meshScale=[1, 1, 1]) self.robot_mapId = p.createMultiBody(baseCollisionShapeIndex=cube_id, baseVisualShapeIndex=-1) p.changeVisualShape(self.robot_mapId, -1, rgbaColor=[0, 0, 1, 0]) cube_id = p.createCollisionShape(p.GEOM_MESH, fileName=os.path.join( pybullet_data.getDataPath(), 'cube.obj'), meshScale=[1, 1, 1]) self.target_mapId = p.createMultiBody(baseCollisionShapeIndex=cube_id, baseVisualShapeIndex=-1) p.changeVisualShape(self.target_mapId, -1, rgbaColor=[1, 0, 0, 0]) self.save_frame = 0 self.fps = 0
def make_env(downstream_task, config=None): assert (downstream_task in ['navigation', 'exploration', 'local_planning', 'local_planning_multi']) if config is None: config = DEFAULT_CONFIGS[downstream_task] start_locations_file = os.path.join(get_model_path('Beechwood'), "first_floor_poses.csv") if downstream_task == 'navigation': return HuskyVisualNavigateEnv(config=config, gpu_count=0) elif downstream_task == 'exploration': return HuskyVisualExplorationEnv(config=config, gpu_count=0, start_locations_file=start_locations_file) elif downstream_task == 'local_planning': return HuskyCoordinateNavigateEnv(config=config, gpu_count=0, start_locations_file=start_locations_file) elif downstream_task == 'local_planning_multi': return HuskyCoordinateNavigateMultiEnv(config=config, gpu_count=0)
def __init__(self, config, gpu_count=0, render_map=False, fixed_endpoints=False, switch_frequency=None): tracemalloc.start() self.old_snapshot = None self.config = self.parse_config(config) CameraRobotEnv.__init__(self, self.config, gpu_count, scene_type='building', tracking_camera=tracking_camera) self.fixed_endpoints = fixed_endpoints self.target_radius = 0.5 self.render_map = render_map self.render_resolution = 256 if switch_frequency is not None: self.switch_frequency = switch_frequency else: self.switch_frequency = self.config["switch_frequency"] # configure environment self.model_selection = self.config["model_selection"] assert self.model_selection in ['in_order', 'random'] self.target_mu = self.config["target_distance_mu"] self.target_sigma = self.config["target_distance_sigma"] self.model_ids = self.config["model_ids"] self.z_coordinates = {} for model_id, z in zip(self.model_ids, self.config["z_coordinates"]): self.z_coordinates[model_id] = z self.all_locations = self.get_valid_locations() self.model_index = -1 if not self.fixed_endpoints: self.switch_model(self.model_selection) else: self.model_id = self.config["model_id"] self.default_z = self.config["initial_pos"][2] self.start_location = self.select_agent_location() self.config["initial_pos"] = [self.start_location[0], self.start_location[1], self.default_z] self.target_location = self.select_target() # introduce robot and scene self.robot_introduce(Husky(self.config, env=self)) self.scene_introduce() self.eps_count = 1 if render_map: mesh_file = os.path.join(get_model_path(self.model_id), "mesh_z_up.obj") self.map_renderer = NavigationMapRenderer(mesh_file, self.default_z, 0.1, render_resolution=self.render_resolution)
def __init__(self, config, gpu_idx, scene_type, tracking_camera): ## The following properties are already instantiated inside xxx_env.py: BaseRobotEnv.__init__(self, config, tracking_camera, scene_type, gpu_idx) #Deprecated #self.penalty = 0 if self.gui: self.screen_arr = np.zeros([512, 512, 3]) self.test_env = "TEST_ENV" in os.environ.keys() and os.environ['TEST_ENV'] == "True" self._use_filler = config["use_filler"] self._require_camera_input = 'rgb_filled' in self.config["output"] or \ 'rgb_prefilled' in self.config["output"] or \ 'depth' in self.config["output"] or \ 'normal' in self.config["output"]\ or \ 'semantics' in self.config["output"] self._require_rgb = 'rgb_filled' in self.config["output"] or "rgb_prefilled" in self.config["output"] self._require_depth = 'depth' in self.config["output"] self._require_normal = 'depth' in self.config["output"] self._require_semantics = 'semantics' in self.config["output"] self._semantic_source = 1 self._semantic_color = 1 if self._require_semantics: assert "semantic_source" in self.config.keys(), "semantic_source not specified in configuration" assert "semantic_color" in self.config.keys(), "semantic_color not specified in configuration" assert self.config["semantic_source"] in [1, 2], "semantic_source not valid" assert self.config["semantic_color"] in [1, 2, 3], "semantic_source not valid" self._semantic_source = self.config["semantic_source"] self._semantic_color = self.config["semantic_color"] self._require_normal = 'normal' in self.config["output"] #if self._require_camera_input: self.model_path = get_model_path(self.model_id) self.save_frame = 0 self.fps = 0 self.last_flagId = None self.visual_flagId = p.createVisualShape(p.GEOM_MESH, fileName=os.path.join(pybullet_data.getDataPath(), 'cube.obj'), meshScale=[0.3, 0.3, 0.3], rgbaColor=[1, 0, 0, 0.7])
def __init__(self, robot, model_id, gravity, timestep, frame_skip, env = None): Scene.__init__(self, gravity, timestep, frame_skip, env) # contains cpp_world.clean_everything() # stadium_pose = cpp_household.Pose() # if self.zero_at_running_strip_start_line: # stadium_pose.set_xyz(27, 21, 0) # see RUN_STARTLINE, RUN_RAD constants filename = os.path.join(get_model_path(model_id), "mesh_z_up.obj") #filename = os.path.join(get_model_path(model_id), "3d", "blender.obj") #textureID = p.loadTexture(os.path.join(get_model_path(model_id), "3d", "rgb.mtl")) if robot.model_type == "MJCF": MJCF_SCALING = robot.mjcf_scaling scaling = [1.0/MJCF_SCALING, 1.0/MJCF_SCALING, 0.6/MJCF_SCALING] else: scaling = [1, 1, 1] magnified = [2, 2, 2] collisionId = p.createCollisionShape(p.GEOM_MESH, fileName=filename, meshScale=scaling, flags=p.GEOM_FORCE_CONCAVE_TRIMESH) textureID = -1 boundaryUid = p.createMultiBody(baseCollisionShapeIndex = collisionId, baseVisualShapeIndex = textureID) p.changeDynamics(boundaryUid, -1, lateralFriction=1) #print(p.getDynamicsInfo(boundaryUid, -1)) self.scene_obj_list = [(boundaryUid, -1)] # baselink index -1 planeName = os.path.join(pybullet_data.getDataPath(), "mjcf/ground_plane.xml") self.ground_plane_mjcf = p.loadMJCF(planeName) if "z_offset" in self.env.config: z_offset = self.env.config["z_offset"] else: z_offset = -0.5 p.resetBasePositionAndOrientation(self.ground_plane_mjcf[0], posObj = [0,0,z_offset], ornObj = [0,0,0,1]) p.changeVisualShape(boundaryUid, -1, rgbaColor=[168 / 255.0, 164 / 255.0, 92 / 255.0, 1.0], specularColor=[0.5, 0.5, 0.5])
def _reset(self): # Randomly select a scenario scenario_index = np.random.randint(self.n_scenarios) scenario = self.scenarios[scenario_index] print("Selected scenario:", scenario) self.model_id = self.config["model_id"] = scenario['sceneId'] self.model_path = get_model_path(self.model_id) self.config["initial_pos"] = [ float(scenario['startX']), float(scenario['startY']), float(scenario['startZ']) ] self.config["target_pos"] = [ float(scenario['goalX']), float(scenario['goalY']), float(scenario['goalZ']) ] self.config["target_orn"] = [0, 0, 0] self.config["initial_orn"] = [0, 0, float(scenario['startAngle'])] self.kill_depth_render() self.setup_rendering_camera() self.scene_introduce() return super(HuskyMultiSceneEnv, self)._reset()
def get_valid_locations(self): floor_poses_files = [os.path.join(get_model_path(model_id), "first_floor_poses.csv") for model_id in self.model_ids] result = {} for model_id, floor_poses_file in zip(self.model_ids, floor_poses_files): result[model_id] = np.loadtxt(floor_poses_file, delimiter=',') return result
def cfg_navigation(): uuid = 'gibson_visualnavigation' cfg = {} cfg['learner'] = { 'algo': 'ppo', # Learning algorithm for RL agent. Currently only PPO 'clip_param': 0.1, # Clip param for trust region in PPO 'entropy_coef': 1e-4, # Weighting of the entropy term in PPO 'eps': 1e-5, # Small epsilon to prevent divide-by-zero 'gamma': 0.99, # Gamma to use if env.observation_space.shape = 1 'internal_state_size': 512, # If using a recurrent policy, what state size to use 'lr': 1e-4, # Learning rate for algorithm 'num_steps': 512, # Length of each rollout 'num_mini_batch': 8, # Size of PPO minibatch 'num_stack': 4, # Frames that each cell (CNN) can see 'max_grad_norm': 0.5, # Clip grads 'ppo_epoch': 8, # Number of times PPO goes over the buffer 'recurrent_policy': False, # Use a recurrent version with the cell as the standard model 'tau': 0.95, # When using GAE 'use_gae': True, # Whether to use GAE 'value_loss_coef': 1e-3, # Weighting of value_loss in PPO 'perception_network': 'AtariNet', # The classname of an architecture to use 'perception_network_kwargs': {}, # kwargs to pass to `perception_network` 'test': False, 'use_replay': True, 'replay_buffer_size': 10000, 'on_policy_epoch': 8, 'off_policy_epoch': 8, } image_dim = 84 cfg['env'] = { 'add_timestep': False, # Add timestep to the observation 'env_name': 'Gibson_HuskyVisualNavigateEnv', 'env_specific_kwargs': { 'blind': False, 'blank_sensor': True, 'gibson_config': '/root/perception_module/evkit/env/gibson/husky_visual_navigate.yaml', 'start_locations_file': os.path.join(get_model_path('Beechwood'), 'first_floor_poses.csv'), }, 'sensors': { 'rgb_filled': None, 'features': None, 'taskonomy': None, 'map': None, 'target': None }, 'transform_fn_pre_aggregation': None, 'transform_fn_post_aggregation': None, 'num_processes': 1, 'num_val_processes': 0, 'repeat_count': 0, } cfg['saving'] = { 'checkpoint': None, 'checkpoint_configs': False, # copy the metadata of the checkpoint. YMMV. 'log_dir': LOG_DIR, 'log_interval': 1, 'save_interval': 100, 'save_dir': 'checkpoints', 'visdom_log_file': os.path.join(LOG_DIR, 'visdom_logs.json'), 'results_log_file': os.path.join(LOG_DIR, 'result_log.pkl'), 'reward_log_file': os.path.join(LOG_DIR, 'rewards.pkl'), 'vis_interval': 200, 'visdom_server': 'localhost', 'visdom_port': '8097', } cfg['training'] = { 'cuda': True, 'seed': random.randint(0, 1000), 'num_frames': 5e5, 'resumable': True, }