def eval_policy(self, state, type_of_agent='background_cars'): """ Returns action based next state in trajectory. Parameters ---------- state : PositionState State of the world, unused Returns ------- SteeringAction """ obj = state.dynamic_objects[type_of_agent][str(self.agent_num)] if not obj.trajectory.is_empty(): p = obj.trajectory.first() target_loc = p[:2].tolist() target_vel = p[2] while obj.contains_point( (p[0], p[1])) and not obj.trajectory.is_empty(): p = obj.trajectory.pop() target_loc = p[:2].tolist() target_vel = p[2] else: return SteeringAction(steering=0.0, acceleration=0.0) ac2 = np.arctan2(obj.y - target_loc[1], target_loc[0] - obj.x) ang = obj.angle if obj.angle < np.pi else obj.angle - 2 * np.pi e_angle = ac2 - ang if e_angle > np.pi: e_angle -= (np.pi * 2) elif e_angle < -np.pi: e_angle += (np.pi * 2) e_vel = target_vel - obj.vel e_vel_ = np.sqrt((obj.y - target_loc[1])**2 + (target_loc[0] - obj.x)**2) - obj.vel action_acc = self.PID_acc.get_control(e_vel) action_steer = self.PID_steer.get_control(e_angle) return SteeringAction(steering=action_steer, acceleration=action_acc)
def eval_policy(self, state): """ Returns action based on keyboard input Parameters ---------- state : PositionState State of the world, unused Returns ------- numpy array with elements (steering,acceleration) """ steer, acc = 0, 0 try: pygame.event.pump() except pygame.error: print("Error: Needs a visualizer") return (0, 0) keys = pygame.key.get_pressed() if keys[pygame.K_UP]: acc = 1 elif keys[pygame.K_DOWN]: acc = -1 if keys[pygame.K_LEFT]: steer = 1 elif keys[pygame.K_RIGHT]: steer = -1 return SteeringAction(steer, acc)
def eval_policy(self, action,state,simplified=False): """ Returns action based next state in trajectory. Parameters ---------- state : PositionState State of the world, unused action : SteeringAction Returns ------- tuple with floats (steering,acceleration) """ if simplified: return SteeringAction(0.0,0.0) if not isinstance(action,SteeringAction): raise Exception('Actions is Not of Type Steering Action') return action
def eval_policy(self, state, simplified=None): """ Always returns the action (0, 0). Parameters ---------- state : PositionState State of the world, unused Returns ------- SteeringAction(0, 0) """ return SteeringAction(0, 0)
def get_action(self,robot_action = None,supervisor_action = None): noise_action = normal(supervisor_action.get_value(),self.noise) return SteeringAction(steering=noise_action[0],acceleration=noise_action[1])
with open('configs/default_config.json') as json_data_file: data = json.load(json_data_file) data['agents']['background_cars'] = 0 data['environment']['visualize'] = False env = uds.UrbanDrivingEnv(data) state = env.current_state state.dynamic_objects['controlled_cars']['0'].x = 100 state.dynamic_objects['controlled_cars']['0'].y = 550 state.dynamic_objects['controlled_cars']['0'].angle = 0 state.dynamic_objects['controlled_cars']['0'].vel = 0 for i in range(80): action = [SteeringAction(0.0, 1.0)] observations, reward, done, info_dict = env.step(action) assert (not done) for i in range(23): action = [SteeringAction(1.0, 1.0)] observations, reward, done, info_dict = env.step(action) assert (not done) for i in range(70): action = [SteeringAction(0.0, 1.0)] observations, reward, done, info_dict = env.step(action) assert (not done) assert (state.dynamic_objects['controlled_cars']['0'].x < 528 and state.dynamic_objects['controlled_cars']['0'].x > 527) assert (state.dynamic_objects['controlled_cars']['0'].y < 110 and state.dynamic_objects['controlled_cars']['0'].y > 109)
import json import gym import gym_urbandriving as uds from gym_urbandriving import * from gym_urbandriving.agents import * from gym_urbandriving.assets import * from gym_urbandriving.utils import Trajectory from gym_urbandriving.actions import SteeringAction import numpy as np import IPython with open('configs/default_config.json') as json_data_file: data = json.load(json_data_file) data['agents']['controlled_cars'] = 2 data['environment']['visualize'] = True data['agents']['state_space'] = 'bmp' action = [SteeringAction(0, 0)] * 2 env = uds.UrbanDrivingEnv(data) observations, reward, done, info_dict = env.step(action) assert (len(observations) == 2) import matplotlib.pyplot as plt plt.imshow(observations[0]) plt.show() state = env.current_state assert (state.dynamic_objects['controlled_cars']['0'].vel == 0.0)
import json import gym import gym_urbandriving as uds from gym_urbandriving import * from gym_urbandriving.agents import * from gym_urbandriving.assets import * from gym_urbandriving.utils import Trajectory import numpy as np from gym_urbandriving.actions import SteeringAction with open('configs/default_config.json') as json_data_file: data = json.load(json_data_file) data['agents']['controlled_cars'] = 2 data['environment']['visualize'] = False action = [SteeringAction(0.0, 0.0)] * 2 env = uds.UrbanDrivingEnv(data) observations, reward, done, info_dict = env.step(action) assert (len(observations) == 2) assert (len(observations[0]) == len(observations[1])) orig_len = len(observations[0]) state = env.current_state assert (state.dynamic_objects['controlled_cars']['0'].vel == 0.0) data['agents']['state_space_config']['goal_position'] = True env = uds.UrbanDrivingEnv(data) observations, reward, done, info_dict = env.step(action) assert (len(observations) == 2) assert (len(observations[0]) == len(observations[1]) == orig_len + 3)