class ShipSteering(Environment): """ The Ship Steering environment as presented in: "Hierarchical Policy Gradient Algorithms". Ghavamzadeh M. and Mahadevan S.. 2013. """ def __init__(self, small=True, n_steps_action=3): """ Constructor. Args: small (bool, True): whether to use a small state space or not. n_steps_action (int, 3): number of integration intervals for each step of the mdp. """ # MDP parameters self.field_size = 150 if small else 1000 low = np.array([0, 0, -np.pi, -np.pi / 12.]) high = np.array([self.field_size, self.field_size, np.pi, np.pi / 12.]) self.omega_max = np.array([np.pi / 12.]) self._v = 3. self._T = 5. self._dt = .2 self._gate_s = np.empty(2) self._gate_e = np.empty(2) self._gate_s[0] = 100 if small else 350 self._gate_s[1] = 120 if small else 400 self._gate_e[0] = 120 if small else 450 self._gate_e[1] = 100 if small else 400 self._out_reward = -100 self._success_reward = 0 self._small = small self._state = None self.n_steps_action = n_steps_action # MDP properties observation_space = spaces.Box(low=low, high=high) action_space = spaces.Box(low=-self.omega_max, high=self.omega_max) horizon = 5000 gamma = .99 mdp_info = MDPInfo(observation_space, action_space, gamma, horizon) # Visualization self._viewer = Viewer(self.field_size, self.field_size, background=(66, 131, 237)) super().__init__(mdp_info) def reset(self, state=None): if state is None: if self._small: self._state = np.zeros(4) self._state[2] = np.pi / 2 else: low = self.info.observation_space.low high = self.info.observation_space.high self._state = (high - low) * np.random.rand(4) + low else: self._state = state return self._state def step(self, action): r = self._bound(action[0], -self.omega_max, self.omega_max) new_state = self._state for _ in range(self.n_steps_action): state = new_state new_state = np.empty(4) new_state[0] = state[0] + self._v * np.cos(state[2]) * self._dt new_state[1] = state[1] + self._v * np.sin(state[2]) * self._dt new_state[2] = normalize_angle(state[2] + state[3] * self._dt) new_state[3] = state[3] + (r - state[3]) * self._dt / self._T if new_state[0] > self.field_size \ or new_state[1] > self.field_size \ or new_state[0] < 0 or new_state[1] < 0: new_state[0] = self._bound(new_state[0], 0, self.field_size) new_state[1] = self._bound(new_state[1], 0, self.field_size) reward = self._out_reward absorbing = True break elif self._through_gate(state[:2], new_state[:2]): reward = self._success_reward absorbing = True break else: reward = -1 absorbing = False self._state = new_state return self._state, reward, absorbing, {} def render(self, mode='human'): self._viewer.line(self._gate_s, self._gate_e, width=3) boat = [[-4, -4], [-4, 4], [4, 4], [8, 0.0], [4, -4]] self._viewer.polygon(self._state[:2], self._state[2], boat, color=(32, 193, 54)) self._viewer.display(self._dt) def stop(self): self._viewer.close() def _through_gate(self, start, end): r = self._gate_e - self._gate_s s = end - start den = self._cross_2d(vecr=r, vecs=s) if den == 0: return False t = self._cross_2d((start - self._gate_s), s) / den u = self._cross_2d((start - self._gate_s), r) / den return 1 >= u >= 0 and 1 >= t >= 0 @staticmethod def _cross_2d(vecr, vecs): return vecr[0] * vecs[1] - vecr[1] * vecs[0]
class PuddleWorld(Environment): """ Puddle world as presented in: "Off-Policy Actor-Critic". Degris T. et al.. 2012. """ def __init__(self, start=None, goal=None, goal_threshold=.1, noise_step=.025, noise_reward=0, reward_goal=0., thrust=.05, puddle_center=None, puddle_width=None, gamma=.99, horizon=5000): """ Constructor. Args: start (np.array, None): starting position of the agent; goal (np.array, None): goal position; goal_threshold (float, .1): distance threshold of the agent from the goal to consider it reached; noise_step (float, .025): noise in actions; noise_reward (float, 0): standard deviation of gaussian noise in reward; reward_goal (float, 0): reward obtained reaching goal state; thrust (float, .05): distance walked during each action; puddle_center (np.array, None): center of the puddle; puddle_width (np.array, None): width of the puddle; """ # MDP parameters self._start = np.array([.2, .4]) if start is None else start self._goal = np.array([1., 1.]) if goal is None else goal self._goal_threshold = goal_threshold self._noise_step = noise_step self._noise_reward = noise_reward self._reward_goal = reward_goal self._thrust = thrust puddle_center = [[.3, .6], [.4, .5], [.8, .9]] if puddle_center is None else puddle_center self._puddle_center = [np.array(center) for center in puddle_center] puddle_width = [[.1, .03], [.03, .1], [.03, .1]] if puddle_width is None else puddle_width self._puddle_width = [np.array(width) for width in puddle_width] self._actions = [np.zeros(2) for _ in range(5)] for i in range(4): self._actions[i][i // 2] = thrust * (i % 2 * 2 - 1) # MDP properties action_space = Discrete(5) observation_space = Box(0., 1., shape=(2,)) mdp_info = MDPInfo(observation_space, action_space, gamma, horizon) # Visualization self._pixels = None self._viewer = Viewer(1.0, 1.0) super().__init__(mdp_info) def reset(self, state=None): if state is None: self._state = self._start.copy() else: self._state = state return self._state def step(self, action): idx = action[0] self._state += self._actions[idx] + np.random.uniform( low=-self._noise_step, high=self._noise_step, size=(2,)) self._state = np.clip(self._state, 0., 1.) absorbing = np.linalg.norm((self._state - self._goal), ord=1) < self._goal_threshold if not absorbing: reward = np.random.randn() * self._noise_reward + self._get_reward( self._state) else: reward = self._reward_goal return self._state, reward, absorbing, {} def render(self): if self._pixels is None: img_size = 100 pixels = np.zeros((img_size, img_size, 3)) for i in range(img_size): for j in range(img_size): x = i / img_size y = j / img_size pixels[i, img_size - 1 - j] = self._get_reward( np.array([x, y])) pixels -= pixels.min() pixels *= 255. / pixels.max() self._pixels = np.floor(255 - pixels) self._viewer.background_image(self._pixels) self._viewer.circle(self._state, 0.01, color=(0, 255, 0)) goal_area = [ [-self._goal_threshold, 0], [0, self._goal_threshold], [self._goal_threshold, 0], [0, -self._goal_threshold] ] self._viewer.polygon(self._goal, 0, goal_area, color=(255, 0, 0), width=1) self._viewer.display(0.1) def stop(self): if self._viewer is not None: self._viewer.close() def _get_reward(self, state): reward = -1. for cen, wid in zip(self._puddle_center, self._puddle_width): reward -= 2. * norm.pdf(state[0], cen[0], wid[0]) * norm.pdf( state[1], cen[1], wid[1]) return reward
class CartPole(Environment): """ The Inverted Pendulum on a Cart environment as presented in: "Least-Squares Policy Iteration". Lagoudakis M. G. and Parr R.. 2003. """ def __init__(self, m=2., M=8., l=.5, g=9.8, mu=1e-2, max_u=50., noise_u=10., horizon=3000, gamma=.95): """ Constructor. Args: m (float, 2.0): mass of the pendulum; M (float, 8.0): mass of the cart; l (float, .5): length of the pendulum; g (float, 9.8): gravity acceleration constant; max_u (float, 50.): maximum allowed input torque; noise_u (float, 10.): maximum noise on the action; horizon (int, 3000): horizon of the problem; gamma (float, .95): discount factor. """ # MDP parameters self._m = m self._M = M self._l = l self._g = g self._alpha = 1 / (self._m + self._M) self._mu = mu self._dt = .1 self._max_u = max_u self._noise_u = noise_u high = np.array([np.inf, np.inf]) # MDP properties observation_space = spaces.Box(low=-high, high=high) action_space = spaces.Discrete(3) mdp_info = MDPInfo(observation_space, action_space, gamma, horizon) # Visualization self._viewer = Viewer(2.5 * l, 2.5 * l) self._last_u = None self._state = None super().__init__(mdp_info) def reset(self, state=None): if state is None: angle = np.random.uniform(-np.pi / 8., np.pi / 8.) self._state = np.array([angle, 0.]) else: self._state = state self._state[0] = normalize_angle(self._state[0]) self._last_u = 0 return self._state def step(self, action): if action == 0: u = -self._max_u elif action == 1: u = 0. else: u = self._max_u self._last_u = u u += np.random.uniform(-self._noise_u, self._noise_u) new_state = odeint(self._dynamics, self._state, [0, self._dt], (u, )) self._state = np.array(new_state[-1]) self._state[0] = normalize_angle(self._state[0]) if np.abs(self._state[0]) > np.pi * .5: reward = -1. absorbing = True else: reward = 0. absorbing = False return self._state, reward, absorbing, {} def render(self, mode='human'): start = 1.25 * self._l * np.ones(2) end = 1.25 * self._l * np.ones(2) end[0] += self._l * np.sin(self._state[0]) end[1] += self._l * np.cos(self._state[0]) self._viewer.line(start, end) self._viewer.square(start, 0, self._l / 10) self._viewer.circle(end, self._l / 20) direction = -np.sign(self._last_u) * np.array([1, 0]) value = np.abs(self._last_u) self._viewer.force_arrow(start, direction, value, self._max_u, self._l / 5) self._viewer.display(self._dt) def stop(self): self._viewer.close() def _dynamics(self, state, t, u): theta = state[0] omega = state[1] d_theta = omega d_omega = (self._g * np.sin(theta) - self._alpha * self._m * self._l * .5 * d_theta**2 * np.sin(2 * theta) * .5 - self._alpha * np.cos(theta) * u) / ( 2 / 3 * self._l - self._alpha * self._m * self._l * .5 * np.cos(theta)**2) return d_theta, d_omega
class InvertedPendulum(Environment): """ The Inverted Pendulum environment (continuous version) as presented in: "Reinforcement Learning In Continuous Time and Space". Doya K.. 2000. "Off-Policy Actor-Critic". Degris T. et al.. 2012. "Deterministic Policy Gradient Algorithms". Silver D. et al. 2014. """ def __init__(self, random_start=False, m=1., l=1., g=9.8, mu=1e-2, max_u=5., horizon=5000, gamma=.99): """ Constructor. Args: random_start (bool, False): whether to start from a random position or from the horizontal one; m (float, 1.0): mass of the pendulum; l (float, 1.0): length of the pendulum; g (float, 9.8): gravity acceleration constant; mu (float, 1e-2): friction constant of the pendulum; max_u (float, 5.0): maximum allowed input torque; horizon (int, 5000): horizon of the problem; gamma (int, .99): discount factor. """ # MDP parameters self._m = m self._l = l self._g = g self._mu = mu self._random = random_start self._dt = .01 self._max_u = max_u self._max_omega = 5 / 2 * np.pi high = np.array([np.pi, self._max_omega]) # MDP properties observation_space = spaces.Box(low=-high, high=high) action_space = spaces.Box(low=np.array([-max_u]), high=np.array([max_u])) mdp_info = MDPInfo(observation_space, action_space, gamma, horizon) # Visualization self._viewer = Viewer(2.5 * l, 2.5 * l) self._last_u = None super().__init__(mdp_info) def reset(self, state=None): if state is None: if self._random: angle = np.random.uniform(-np.pi, np.pi) else: angle = np.pi / 2 self._state = np.array([angle, 0.]) else: self._state = state self._state[0] = normalize_angle(self._state[0]) self._state[1] = self._bound(self._state[1], -self._max_omega, self._max_omega) self._last_u = 0.0 return self._state def step(self, action): u = self._bound(action[0], -self._max_u, self._max_u) new_state = odeint(self._dynamics, self._state, [0, self._dt], (u,)) self._state = np.array(new_state[-1]) self._state[0] = normalize_angle(self._state[0]) self._state[1] = self._bound(self._state[1], -self._max_omega, self._max_omega) reward = np.cos(self._state[0]) self._last_u = u.item() return self._state, reward, False, {} def render(self, mode='human'): start = 1.25 * self._l * np.ones(2) end = 1.25 * self._l * np.ones(2) end[0] += self._l * np.sin(self._state[0]) end[1] += self._l * np.cos(self._state[0]) self._viewer.line(start, end) self._viewer.circle(start, self._l / 40) self._viewer.circle(end, self._l / 20) self._viewer.torque_arrow(start, -self._last_u, self._max_u, self._l / 5) self._viewer.display(self._dt) def stop(self): self._viewer.close() def _dynamics(self, state, t, u): theta = state[0] omega = self._bound(state[1], -self._max_omega, self._max_omega) d_theta = omega d_omega = (-self._mu * omega + self._m * self._g * self._l * np.sin( theta) + u) / (self._m * self._l**2) return d_theta, d_omega