def __init__(self, action_space, sensor_names=None, camera_size=None, camera_hfov=None, model_names=None, app=None, dt=None): super(StraightCarPanda3dEnv, self).__init__(action_space, sensor_names=sensor_names, camera_size=camera_size, camera_hfov=camera_hfov, model_names=model_names, app=app, dt=dt) self.dist_space = BoxSpace(0, 275 + 225)
def main(): # actions are forward acceleration and lateral velocity action_space = BoxSpace(low=np.array([0.0, -1.0]), high=np.array([0.0, 1.0])) env = GeometricCarPanda3dEnv(action_space) start_time = time.time() num_trajs = 10 num_steps = 100 for traj_iter in range(num_trajs): env.reset() for step_iter in range(num_steps): action = action_space.sample() env.step(action) env.render() print("average FPS: {}".format(num_trajs * (num_steps + 1) / (time.time() - start_time)))
def __init__(self, action_space, feature_type=None, filter_features=None, max_time_steps=100, distance_threshold=4.0, **kwargs): """ filter_features indicates whether to filter out key points that are not on the object in the current image. Key points in the target image are always filtered out. """ SimpleQuadPanda3dEnv.__init__(self, action_space, **kwargs) ServoingEnv.__init__(self, env=self, max_time_steps=max_time_steps, distance_threshold=distance_threshold) lens = self.camera_node.node().getLens() self._observation_space.spaces['points'] = BoxSpace( np.array([-np.inf, lens.getNear(), -np.inf]), np.array([np.inf, lens.getFar(), np.inf])) film_size = tuple(int(s) for s in lens.getFilmSize()) self.mask_camera_sensor = Panda3dMaskCameraSensor( self.app, (self.skybox_node, self.city_node), size=film_size, near_far=(lens.getNear(), lens.getFar()), hfov=lens.getFov()) for cam in self.mask_camera_sensor.cam: cam.reparentTo(self.camera_sensor.cam) self.filter_features = True if filter_features is None else False self._feature_type = feature_type or 'sift' if cv2.__version__.split('.')[0] == '3': from cv2.xfeatures2d import SIFT_create, SURF_create from cv2 import ORB_create if self.feature_type == 'orb': # https://github.com/opencv/opencv/issues/6081 cv2.ocl.setUseOpenCL(False) else: SIFT_create = cv2.SIFT SURF_create = cv2.SURF ORB_create = cv2.ORB if self.feature_type == 'sift': self._feature_extractor = SIFT_create() elif self.feature_type == 'surf': self._feature_extractor = SURF_create() elif self.feature_type == 'orb': self._feature_extractor = ORB_create() else: raise ValueError("Unknown feature extractor %s" % self.feature_type) if self.feature_type == 'orb': self._matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True) else: self._matcher = cv2.BFMatcher(cv2.NORM_L2, crossCheck=True) self._target_key_points = None self._target_descriptors = None
def __init__(self, low, high, axis=None, dtype=None): """ high and low are bounds for the translation part and the angle magnitude """ self.box_space = BoxSpace(low[:3], high[:3], dtype=dtype) self.axis_angle_space = AxisAngleSpace(low[3], high[3], axis=axis) super(TranslationAxisAngleSpace, self).__init__([self.box_space, self.axis_angle_space]) self.low = np.append(self.box_space.low, self.axis_angle_space.low) self.high = np.append(self.box_space.high, self.axis_angle_space.high) # alias the corresponding slices of self.low and self.high self.box_space.low, self.box_space.high = self.low[:3], self.high[:3] self.axis_angle_space.low, self.axis_angle_space.high = self.low[3:], self.high[3:] assert self.low.shape == (4,) assert self.high.shape == (4,)
def __init__(self, *args, **kwargs): super(BboxSimpleQuadPanda3dEnv, self).__init__(*args, **kwargs) lens = self.camera_node.node().getLens() self._observation_space.spaces['points'] = BoxSpace( np.array([[-np.inf, lens.getNear(), -np.inf]] * 4), np.array([[np.inf, lens.getFar(), np.inf]] * 4)) film_size = tuple(int(s) for s in lens.getFilmSize()) self.mask_camera_sensor = Panda3dMaskCameraSensor( self.app, (self.skybox_node, self.city_node), size=film_size, near_far=(lens.getNear(), lens.getFar()), hfov=lens.getFov()) for cam in self.mask_camera_sensor.cam: cam.reparentTo(self.camera_sensor.cam)
def __init__(self, *args, **kwargs): super(Bbox3dSimpleQuadPanda3dEnv, self).__init__(*args, **kwargs) self._observation_space.spaces['points'] = BoxSpace(-np.inf, np.inf, shape=(8, 3))
def __init__(self, action_space, sensor_names=None, camera_size=None, camera_hfov=None, offset=None, car_env_class=None, car_action_space=None, car_model_names=None, app=None, dt=None): super(SimpleQuadPanda3dEnv, self).__init__(app=app, dt=dt) self._action_space = action_space self._sensor_names = sensor_names if sensor_names is not None else [ 'image' ] # don't override empty list self.offset = np.array(offset) if offset is not None \ else np.array([0, -1 / np.tan(np.pi / 6), 1]) * 15.05 # offset to quad self.car_env_class = car_env_class or GeometricCarPanda3dEnv self.car_action_space = car_action_space or BoxSpace( np.array([0.0, 0.0]), np.array([0.0, 0.0])) self.car_model_names = car_model_names or ['camaro2'] self.car_env = self.car_env_class(self.car_action_space, sensor_names=[], model_names=self.car_model_names, app=self.app, dt=self.dt) self.skybox_node = self.car_env.skybox_node self.city_node = self.car_env.city_node self.car_node = self.car_env.car_node # modify the car's speed limits so that the car's speed is always a quarter of the quad's maximum forward velocity self.car_env.speed_offset_space.low[ 0] = self.action_space.high[1] / 4 # meters per second self.car_env.speed_offset_space.high[0] = self.action_space.high[1] / 4 # show the quad model only if render() is called, otherwise keep it hidden self._load_quad() self.quad_node.setName('quad') self.quad_node.hide() for quad_prop_local_node in self.quad_prop_local_nodes: quad_prop_local_node.hide() self.prop_angle = 0.0 self.prop_rpm = 10212 observation_spaces = dict() if self.sensor_names: color = depth = False for sensor_name in self.sensor_names: if sensor_name == 'image': color = True elif sensor_name == 'depth_image': depth = True else: raise ValueError('Unknown sensor name %s' % sensor_name) self.camera_sensor = Panda3dCameraSensor(self.app, color=color, depth=depth, size=camera_size, hfov=camera_hfov) self.camera_node = self.camera_sensor.cam self.camera_node.reparentTo(self.quad_node) self.camera_node.setPos( tuple(np.array([0, -1 / np.tan(np.pi / 6), 1]) * -0.05)) # slightly in front of the quad self.camera_node.setQuat( tuple(tf.quaternion_about_axis(-np.pi / 6, np.array([1, 0, 0])))) self.camera_node.setName('quad_camera') lens = self.camera_node.node().getLens() film_size = tuple(int(s) for s in lens.getFilmSize()) for sensor_name in self.sensor_names: if sensor_name == 'image': observation_spaces[sensor_name] = BoxSpace( 0, 255, shape=film_size[::-1] + (3, ), dtype=np.uint8) elif sensor_name == 'depth_image': observation_spaces[sensor_name] = BoxSpace( lens.getNear(), lens.getFar(), shape=film_size[::-1] + (1, )) else: self.camera_sensor = None self._observation_space = DictSpace(observation_spaces) self._first_render = True
class StraightCarPanda3dEnv(CarPanda3dEnv): def __init__(self, action_space, sensor_names=None, camera_size=None, camera_hfov=None, model_names=None, app=None, dt=None): super(StraightCarPanda3dEnv, self).__init__(action_space, sensor_names=sensor_names, camera_size=camera_size, camera_hfov=camera_hfov, model_names=model_names, app=app, dt=dt) self.dist_space = BoxSpace(0, 275 + 225) # minimum and maximum position of the car # [-51 - 6, -275, 10.7] # [-51 + 6, 225, 10.7] @property def straight_dist(self): return self._straight_dist @straight_dist.setter def straight_dist(self, straight_dist): self._straight_dist = np.clip(straight_dist, self.dist_space.low, self.dist_space.high) @property def position(self): return np.array( [-51 + self.lane_offset, -275 + self.straight_dist, 10.7]) def step(self, action): forward_acceleration, lateral_velocity = action self.speed += forward_acceleration * self.dt self.lane_offset += lateral_velocity * self.dt self.straight_dist += self.speed * self.dt self.car_node.setPos(tuple(self.position)) return self.observe(), None, False, dict() def reset(self, state=None): self._first_render = True if state is None: speed, lane_offset = self.speed_offset_space.sample() straight_dist = self.dist_space.sample() model_name_ind = np.random.randint(0, len(self.model_names)) state = speed, lane_offset, straight_dist, model_name_ind self.set_state(state) return self.observe() def get_state(self): model_name_ind = self.model_names.index(self.model_name) return np.array( [self.speed, self.lane_offset, self.straight_dist, model_name_ind]) def set_state(self, state): speed, lane_offset, straight_dist, model_name_ind = state model_name_ind = int(model_name_ind) self.speed, self.lane_offset, self.straight_dist = speed, lane_offset, straight_dist self.model_name = self.model_names[model_name_ind] self.car_node.setPos(tuple(self.position))
def __init__(self, action_space, sensor_names=None, camera_size=None, camera_hfov=None, model_names=None, app=None, dt=None): super(CarPanda3dEnv, self).__init__(app=app, dt=dt) self._action_space = action_space self._sensor_names = sensor_names if sensor_names is not None else [ 'image' ] # don't override empty list self.model_names = model_names or ['camaro2'] if not isinstance(self.model_names, (tuple, list)): raise ValueError( 'model_names should be a tuple or a list, but %r was given.' % model_names) # state self._speed = 1.0 self._lane_offset = 2.0 self._straight_dist = 0.0 self._model_name = None # road properties self._lane_width = 4.0 self._num_lanes = 2 self.speed_offset_space = BoxSpace( [0.0, 0.5 * self._lane_width], [10.0, (self._num_lanes - 0.5) * self._lane_width]) self._load_city() self.car_node = self.app.render.attachNewNode('car') self._car_local_node = None self._car_local_nodes = dict() self.model_name = self.model_names[0] # first car by default observation_spaces = dict() if self.sensor_names: color = depth = False for sensor_name in self.sensor_names: if sensor_name == 'image': color = True elif sensor_name == 'depth_image': depth = True else: raise ValueError('Unknown sensor name %s' % sensor_name) self.camera_sensor = Panda3dCameraSensor(self.app, color=color, depth=depth, size=camera_size, hfov=camera_hfov) self.camera_node = self.camera_sensor.cam self.camera_node.reparentTo(self.car_node) self.camera_node.setPos(tuple(np.array( [0, 1, 2]))) # slightly in front of the car self.camera_node.setName('car_camera') lens = self.camera_node.node().getLens() film_size = tuple(int(s) for s in lens.getFilmSize()) for sensor_name in self.sensor_names: if sensor_name == 'image': observation_spaces[sensor_name] = BoxSpace( 0, 255, shape=film_size[::-1] + (3, ), dtype=np.uint8) elif sensor_name == 'depth_image': observation_spaces[sensor_name] = BoxSpace( lens.getNear(), lens.getFar(), shape=film_size[::-1] + (1, )) else: self.camera_sensor = None self._observation_space = DictSpace(observation_spaces) self._first_render = True
def main(): # actions are forward acceleration and lateral velocity action_space = BoxSpace(low=np.array([-1.0, -1.0]), high=np.array([1.0, 1.0])) car_model_names = [ 'camaro2', 'kia_rio_blue', 'kia_rio_red', 'kia_rio_silver', 'kia_rio_white', 'kia_rio_yellow', 'mazda6', 'mitsubishi_lancer_evo', 'sport' ] env = CustomSimpleQuadPanda3dEnv( action_space, sensor_names=[], # empty sensor_names means no observations car_model_names=car_model_names) num_camera_modes = 3 key_map = dict(left=False, right=False, up=False, down=False, camera_pressed=False, camera_mode=0) def step(task): forward_acceleration = as_one(key_map['up']) - as_one(key_map['down']) lateral_velocity = as_one(key_map['right']) - as_one(key_map['left']) action = np.array([forward_acceleration, lateral_velocity]) env.step(action) if key_map['camera_pressed']: key_map['camera_mode'] = (key_map['camera_mode'] + 1) % num_camera_modes if key_map['camera_mode'] == 0: env.app.cam.reparentTo(env.app.render) env.app.cam.setQuat( tuple(tf.quaternion_about_axis(-np.pi / 2, np.array([1, 0, 0])))) env.app.cam.setPos( tuple( np.array(env.car_node.getPos()) + np.array([0., 0., 100.]))) elif key_map['camera_mode'] in (1, 2): if key_map['camera_pressed']: tightness = 1.0 else: tightness = 0.1 if key_map['camera_mode'] == 1: target_node = env.car_node offset = np.array([0., -4., 3.]) * 3 else: target_node = env.quad_node offset = np.array([0., -4., 3.]) * .5 target_T = tf.pose_matrix(target_node.getQuat(), target_node.getPos()) target_camera_pos = target_T[:3, 3] + target_T[:3, :3].dot(offset) env.app.cam.setPos( tuple((1 - tightness) * np.array(env.app.cam.getPos()) + tightness * target_camera_pos)) env.app.cam.lookAt(target_node) else: env.app.cam.reparentTo(env.car_node) env.app.cam.setQuat((1, 0, 0, 0)) env.app.cam.setPos(tuple(np.array( [0, 1, 2]))) # slightly in front of the car key_map['camera_pressed'] = False return Task.cont env.app.taskMgr.add(step, "step") add_instructions(env.app, 0.06, "[ESC]: Quit") add_instructions(env.app, 0.12, "[R]: Reset environment") add_instructions(env.app, 0.18, "[Left Arrow]: Move car left") add_instructions(env.app, 0.24, "[Right Arrow]: Move car right") add_instructions(env.app, 0.30, "[Up Arrow]: Accelerate the car") add_instructions(env.app, 0.36, "[Down Arrow]: Decelerate the car") add_instructions(env.app, 0.42, "[C]: Toggle camera mode") add_instructions(env.app, 0.48, "[S]: Take screenshot") env.app.accept('r', env.reset) env.app.accept('arrow_left', key_map.update, [[('left', True)]]) env.app.accept('arrow_left-up', key_map.update, [[('left', False)]]) env.app.accept('arrow_right', key_map.update, [[('right', True)]]) env.app.accept('arrow_right-up', key_map.update, [[('right', False)]]) env.app.accept('arrow_up', key_map.update, [[('up', True)]]) env.app.accept('arrow_up-up', key_map.update, [[('up', False)]]) env.app.accept('arrow_down', key_map.update, [[('down', True)]]) env.app.accept('arrow_down-up', key_map.update, [[('down', False)]]) env.app.accept('c-up', key_map.update, [[('camera_pressed', True)]]) env.app.accept('s-up', env.app.screenshot) env.reset() env.app.run()