def step(self, action): # update the angle of the propellers (for rendering purposes) if self.prop_rpm: self.prop_angle += (self.prop_rpm * 2 * np.pi / 60) * self.dt self.prop_angle -= 2 * np.pi * np.floor(self.prop_angle / (2 * np.pi)) for quad_prop_id, quad_prop_local_node in enumerate( self.quad_prop_local_nodes): is_ccw = quad_prop_id in (1, 2) angle = self.prop_angle if is_ccw else -self.prop_angle quad_prop_local_node.setQuat( tuple(tf.quaternion_about_axis(angle, np.array([0, 0, 1])))) self.car_env.step(action) # set the position of the quad to be behind the car car_T = tf.pose_matrix(self.car_node.getQuat(), self.car_node.getPos()) quad_pos = car_T[:3, 3] + car_T[:3, :3].dot( np.array([0., -4., 3.]) * 4) # set the rotation of the quad to be the rotation of the car projected so that the z-axis is up axis = np.cross(car_T[:3, 2], np.array([0, 0, 1])) angle = tf.angle_between_vectors(car_T[:3, 2], np.array([0, 0, 1])) if np.isclose(angle, 0.0): project_T = np.eye(4) else: project_T = tf.rotation_matrix(angle, axis) quad_T = project_T.dot(car_T) quad_quat = tf.quaternion_from_matrix(quad_T[:3, :3]) tightness = 0.1 self.quad_node.setPosQuat( tuple((1 - tightness) * np.array(self.quad_node.getPos()) + tightness * quad_pos), tuple(quad_quat)) return self.observe(), None, False, dict()
def render(self): self.app.cam.setQuat( tuple(tf.quaternion_about_axis(-np.pi / 2, np.array([1, 0, 0])))) self.app.cam.setPos((0, 0, 2000)) for _ in range(self.app.graphicsEngine.getNumWindows()): self.app.graphicsEngine.renderFrame() self.app.graphicsEngine.syncFrame()
def step(self, action): # update the angle of the propellers (for rendering purposes) if self.prop_rpm: self.prop_angle += (self.prop_rpm * 2 * np.pi / 60) * self.dt self.prop_angle -= 2 * np.pi * np.floor(self.prop_angle / (2 * np.pi)) for quad_prop_id, quad_prop_local_node in enumerate( self.quad_prop_local_nodes): is_ccw = quad_prop_id in (1, 2) angle = self.prop_angle if is_ccw else -self.prop_angle quad_prop_local_node.setQuat( tuple(tf.quaternion_about_axis(angle, np.array([0, 0, 1])))) car_action = self.car_env.action_space.sample() self.car_env.step(car_action) # update action to be within the action space if not self.action_space.contains(action): action = self.action_space.clip(action, out=action) linear_vel, angular_vel = np.split(action, [3]) if angular_vel.shape == (1, ): angular_vel = angular_vel * self._action_space.axis # compute next state quad_T = tf.pose_matrix(self.quad_node.getQuat(), self.quad_node.getPos()) quad_to_next_quad_T = tf.position_axis_angle_matrix( np.append(linear_vel, angular_vel) * self.dt) next_quad_T = quad_T.dot(quad_to_next_quad_T) # set new state self.quad_node.setPosQuat( tuple(next_quad_T[:3, 3]), tuple(tf.quaternion_from_matrix(next_quad_T[:3, :3]))) # update action to be consistent with the state clippings quad_to_next_quad_T = tf.inverse_matrix(quad_T).dot(next_quad_T) linear_vel, angular_vel = np.split( tf.position_axis_angle_from_matrix(quad_to_next_quad_T) / self.dt, [3]) # project angular_vel onto the axis if self._action_space.axis is not None: angular_vel = angular_vel.dot(self._action_space.axis) action[:] = np.append(linear_vel, angular_vel) return self.observe(), None, False, dict()
def step(task): forward_acceleration = as_one(key_map['up']) - as_one(key_map['down']) lateral_velocity = as_one(key_map['right']) - as_one(key_map['left']) action = np.array([forward_acceleration, lateral_velocity]) env.step(action) if key_map['camera_pressed']: key_map['camera_mode'] = (key_map['camera_mode'] + 1) % num_camera_modes if key_map['camera_mode'] == 0: env.app.cam.reparentTo(env.app.render) env.app.cam.setQuat( tuple(tf.quaternion_about_axis(-np.pi / 2, np.array([1, 0, 0])))) env.app.cam.setPos( tuple( np.array(env.car_node.getPos()) + np.array([0., 0., 100.]))) elif key_map['camera_mode'] in (1, 2): if key_map['camera_pressed']: tightness = 1.0 else: tightness = 0.1 if key_map['camera_mode'] == 1: target_node = env.car_node offset = np.array([0., -4., 3.]) * 3 else: target_node = env.quad_node offset = np.array([0., -4., 3.]) * .5 target_T = tf.pose_matrix(target_node.getQuat(), target_node.getPos()) target_camera_pos = target_T[:3, 3] + target_T[:3, :3].dot(offset) env.app.cam.setPos( tuple((1 - tightness) * np.array(env.app.cam.getPos()) + tightness * target_camera_pos)) env.app.cam.lookAt(target_node) else: env.app.cam.reparentTo(env.car_node) env.app.cam.setQuat((1, 0, 0, 0)) env.app.cam.setPos(tuple(np.array( [0, 1, 2]))) # slightly in front of the car key_map['camera_pressed'] = False return Task.cont
def __init__(self, action_space, sensor_names=None, camera_size=None, camera_hfov=None, offset=None, car_env_class=None, car_action_space=None, car_model_names=None, app=None, dt=None): super(SimpleQuadPanda3dEnv, self).__init__(app=app, dt=dt) self._action_space = action_space self._sensor_names = sensor_names if sensor_names is not None else [ 'image' ] # don't override empty list self.offset = np.array(offset) if offset is not None \ else np.array([0, -1 / np.tan(np.pi / 6), 1]) * 15.05 # offset to quad self.car_env_class = car_env_class or GeometricCarPanda3dEnv self.car_action_space = car_action_space or BoxSpace( np.array([0.0, 0.0]), np.array([0.0, 0.0])) self.car_model_names = car_model_names or ['camaro2'] self.car_env = self.car_env_class(self.car_action_space, sensor_names=[], model_names=self.car_model_names, app=self.app, dt=self.dt) self.skybox_node = self.car_env.skybox_node self.city_node = self.car_env.city_node self.car_node = self.car_env.car_node # modify the car's speed limits so that the car's speed is always a quarter of the quad's maximum forward velocity self.car_env.speed_offset_space.low[ 0] = self.action_space.high[1] / 4 # meters per second self.car_env.speed_offset_space.high[0] = self.action_space.high[1] / 4 # show the quad model only if render() is called, otherwise keep it hidden self._load_quad() self.quad_node.setName('quad') self.quad_node.hide() for quad_prop_local_node in self.quad_prop_local_nodes: quad_prop_local_node.hide() self.prop_angle = 0.0 self.prop_rpm = 10212 observation_spaces = dict() if self.sensor_names: color = depth = False for sensor_name in self.sensor_names: if sensor_name == 'image': color = True elif sensor_name == 'depth_image': depth = True else: raise ValueError('Unknown sensor name %s' % sensor_name) self.camera_sensor = Panda3dCameraSensor(self.app, color=color, depth=depth, size=camera_size, hfov=camera_hfov) self.camera_node = self.camera_sensor.cam self.camera_node.reparentTo(self.quad_node) self.camera_node.setPos( tuple(np.array([0, -1 / np.tan(np.pi / 6), 1]) * -0.05)) # slightly in front of the quad self.camera_node.setQuat( tuple(tf.quaternion_about_axis(-np.pi / 6, np.array([1, 0, 0])))) self.camera_node.setName('quad_camera') lens = self.camera_node.node().getLens() film_size = tuple(int(s) for s in lens.getFilmSize()) for sensor_name in self.sensor_names: if sensor_name == 'image': observation_spaces[sensor_name] = BoxSpace( 0, 255, shape=film_size[::-1] + (3, ), dtype=np.uint8) elif sensor_name == 'depth_image': observation_spaces[sensor_name] = BoxSpace( lens.getNear(), lens.getFar(), shape=film_size[::-1] + (1, )) else: self.camera_sensor = None self._observation_space = DictSpace(observation_spaces) self._first_render = True