def step(self, action): # update the angle of the propellers (for rendering purposes) if self.prop_rpm: self.prop_angle += (self.prop_rpm * 2 * np.pi / 60) * self.dt self.prop_angle -= 2 * np.pi * np.floor(self.prop_angle / (2 * np.pi)) for quad_prop_id, quad_prop_local_node in enumerate( self.quad_prop_local_nodes): is_ccw = quad_prop_id in (1, 2) angle = self.prop_angle if is_ccw else -self.prop_angle quad_prop_local_node.setQuat( tuple(tf.quaternion_about_axis(angle, np.array([0, 0, 1])))) self.car_env.step(action) # set the position of the quad to be behind the car car_T = tf.pose_matrix(self.car_node.getQuat(), self.car_node.getPos()) quad_pos = car_T[:3, 3] + car_T[:3, :3].dot( np.array([0., -4., 3.]) * 4) # set the rotation of the quad to be the rotation of the car projected so that the z-axis is up axis = np.cross(car_T[:3, 2], np.array([0, 0, 1])) angle = tf.angle_between_vectors(car_T[:3, 2], np.array([0, 0, 1])) if np.isclose(angle, 0.0): project_T = np.eye(4) else: project_T = tf.rotation_matrix(angle, axis) quad_T = project_T.dot(car_T) quad_quat = tf.quaternion_from_matrix(quad_T[:3, :3]) tightness = 0.1 self.quad_node.setPosQuat( tuple((1 - tightness) * np.array(self.quad_node.getPos()) + tightness * quad_pos), tuple(quad_quat)) return self.observe(), None, False, dict()
def render(self): if self.quad_node.isHidden(): self.quad_node.show() for quad_prop_local_node in self.quad_prop_local_nodes: quad_prop_local_node.show() if self._first_render: tightness = 1.0 self._first_render = False else: tightness = 0.1 target_node = self.quad_node target_T = tf.pose_matrix(target_node.getQuat(), target_node.getPos()) target_camera_pos = target_T[:3, 3] + target_T[:3, :3].dot( np.array([0., -1 / np.tan(np.pi / 6), 1]) * 15) self.app.cam.setPos( tuple((1 - tightness) * np.array(self.app.cam.getPos()) + tightness * target_camera_pos)) self.app.cam.lookAt(target_node) # render observation window(s) for _ in range(self.camera_sensor.graphics_engine.getNumWindows()): self.camera_sensor.graphics_engine.renderFrame() self.camera_sensor.graphics_engine.syncFrame() # render main window(s) for _ in range(self.app.graphicsEngine.getNumWindows()): self.app.graphicsEngine.renderFrame() self.app.graphicsEngine.syncFrame()
def hor_car_T(self): hor_car_T = tf.pose_matrix(self.car_node.getQuat(), self.car_node.getPos()) hor_car_rot_z = np.array([0, 0, 1]) hor_car_rot_x = np.cross(hor_car_T[:3, 1], hor_car_rot_z) hor_car_rot_y = np.cross(hor_car_rot_z, hor_car_rot_x) hor_car_T[:3, :3] = np.array( [hor_car_rot_x, hor_car_rot_y, hor_car_rot_z]).T return hor_car_T
def step(self, action): # update the angle of the propellers (for rendering purposes) if self.prop_rpm: self.prop_angle += (self.prop_rpm * 2 * np.pi / 60) * self.dt self.prop_angle -= 2 * np.pi * np.floor(self.prop_angle / (2 * np.pi)) for quad_prop_id, quad_prop_local_node in enumerate( self.quad_prop_local_nodes): is_ccw = quad_prop_id in (1, 2) angle = self.prop_angle if is_ccw else -self.prop_angle quad_prop_local_node.setQuat( tuple(tf.quaternion_about_axis(angle, np.array([0, 0, 1])))) car_action = self.car_env.action_space.sample() self.car_env.step(car_action) # update action to be within the action space if not self.action_space.contains(action): action = self.action_space.clip(action, out=action) linear_vel, angular_vel = np.split(action, [3]) if angular_vel.shape == (1, ): angular_vel = angular_vel * self._action_space.axis # compute next state quad_T = tf.pose_matrix(self.quad_node.getQuat(), self.quad_node.getPos()) quad_to_next_quad_T = tf.position_axis_angle_matrix( np.append(linear_vel, angular_vel) * self.dt) next_quad_T = quad_T.dot(quad_to_next_quad_T) # set new state self.quad_node.setPosQuat( tuple(next_quad_T[:3, 3]), tuple(tf.quaternion_from_matrix(next_quad_T[:3, :3]))) # update action to be consistent with the state clippings quad_to_next_quad_T = tf.inverse_matrix(quad_T).dot(next_quad_T) linear_vel, angular_vel = np.split( tf.position_axis_angle_from_matrix(quad_to_next_quad_T) / self.dt, [3]) # project angular_vel onto the axis if self._action_space.axis is not None: angular_vel = angular_vel.dot(self._action_space.axis) action[:] = np.append(linear_vel, angular_vel) return self.observe(), None, False, dict()
def step(task): forward_acceleration = as_one(key_map['up']) - as_one(key_map['down']) lateral_velocity = as_one(key_map['right']) - as_one(key_map['left']) action = np.array([forward_acceleration, lateral_velocity]) env.step(action) if key_map['camera_pressed']: key_map['camera_mode'] = (key_map['camera_mode'] + 1) % num_camera_modes if key_map['camera_mode'] == 0: env.app.cam.reparentTo(env.app.render) env.app.cam.setQuat( tuple(tf.quaternion_about_axis(-np.pi / 2, np.array([1, 0, 0])))) env.app.cam.setPos( tuple( np.array(env.car_node.getPos()) + np.array([0., 0., 100.]))) elif key_map['camera_mode'] in (1, 2): if key_map['camera_pressed']: tightness = 1.0 else: tightness = 0.1 if key_map['camera_mode'] == 1: target_node = env.car_node offset = np.array([0., -4., 3.]) * 3 else: target_node = env.quad_node offset = np.array([0., -4., 3.]) * .5 target_T = tf.pose_matrix(target_node.getQuat(), target_node.getPos()) target_camera_pos = target_T[:3, 3] + target_T[:3, :3].dot(offset) env.app.cam.setPos( tuple((1 - tightness) * np.array(env.app.cam.getPos()) + tightness * target_camera_pos)) env.app.cam.lookAt(target_node) else: env.app.cam.reparentTo(env.car_node) env.app.cam.setQuat((1, 0, 0, 0)) env.app.cam.setPos(tuple(np.array( [0, 1, 2]))) # slightly in front of the car key_map['camera_pressed'] = False return Task.cont
def render(self): if self._first_render: tightness = 1.0 self._first_render = False else: tightness = 0.1 target_node = self.car_node target_T = tf.pose_matrix(target_node.getQuat(), target_node.getPos()) target_camera_pos = target_T[:3, 3] + target_T[:3, :3].dot( np.array([0., -4., 3.]) * 4) self.app.cam.setPos( tuple((1 - tightness) * np.array(self.app.cam.getPos()) + tightness * target_camera_pos)) self.app.cam.lookAt(target_node) # render observation window(s) for _ in range(self.camera_sensor.graphics_engine.getNumWindows()): self.camera_sensor.graphics_engine.renderFrame() self.camera_sensor.graphics_engine.syncFrame() # render main window(s) for _ in range(self.app.graphicsEngine.getNumWindows()): self.app.graphicsEngine.renderFrame() self.app.graphicsEngine.syncFrame()
def get_state(self): quad_T = tf.pose_matrix(self.quad_node.getQuat(), self.quad_node.getPos()) quad_state = tf.position_axis_angle_from_matrix(quad_T) car_state = self.car_env.get_state() return np.concatenate([quad_state, car_state])