示例#1
0
    def gizmo(self):
        pygame.draw.line(
            self.screen, Color.Magenta,
            world_to_pixels(CENTER_POINT, SCREEN_HEIGHT, PPM),
            world_to_pixels(S_POINT + vec2(ROAD_WIDTH, 0), SCREEN_HEIGHT, PPM))

        theta = Util.angle_direct(
            Util.normalize(S_POINT - CENTER_POINT),
            Util.normalize(self.car.body.position - CENTER_POINT))
        theta = Util.deg_to_rad(theta)
        h = vec2(RADIUS_INNER * np.cos(theta) + CENTER_POINT.x,
                 RADIUS_INNER * np.sin(theta) +
                 CENTER_POINT.y)  # orthogonal projection
        pygame.draw.line(self.screen, Color.Red,
                         world_to_pixels(CENTER_POINT, SCREEN_HEIGHT, PPM),
                         world_to_pixels(h, SCREEN_HEIGHT, PPM))

        tangent = Util.rotate(Util.normalize(CENTER_POINT - h),
                              -90.0)  # tangent to the circle
        pygame.draw.line(self.screen, Color.Yellow,
                         world_to_pixels(h, SCREEN_HEIGHT, PPM),
                         world_to_pixels(h + tangent, SCREEN_HEIGHT, PPM))
示例#2
0
    # Create Testbed
    testbed = TestbedRace(sim_param=param)

    # Recursively go to each folder of directory
    for x in os.walk(dir_name):

        curr_dir = x[0] + '/'

        fo = None  # file object to open file for recording
        writer = None  # writer object to record events
        filename = ''

        if glob.glob(curr_dir + "*.h5"):
            # create csv file
            filename = curr_dir + Util.get_time_string() + "_" + str(
                param.max_ep) + "ep_" + testbed.env_name + "_eval.csv"
            print('csv file: {}'.format(filename))
            fo = open(filename, 'a')
            writer = csv.writer(fo)

        # For each saved Neural Networks model
        for f in sorted(glob.glob(curr_dir + "*.h5")):

            print('')  # newline print
            debug_race.xprint(color=PRINT_GREEN,
                              msg="Run NN file: {}".format(f))

            # Simulation lifecycle
            testbed.setup_simulations(file_to_load=f)
            testbed.run_simulation()
示例#3
0
import res.Util as Util
import Global

# Global fixed
record = False  # record flag, if yes record simulation's events in file
debug = True  # debug mode flag, if debug mode then print event's message in console

# Simulation id
sim_id = 0

# Variables that are reset for each simulation
simlogs_fo = None  # file object to open file for recording
header_write = False # Write header of record file only once at the beginning of each simulation
simlogs_writer = None  # writer object to record events
sim_event_count = 0
timestr = Util.get_time_string()

def reset_simulation_global():
    global header_write
    global simlogs_fo
    global simlogs_writer
    global sim_event_count
    global timestr

    header_write = False # Write header of record file only once at the beginning of each simulation
    simlogs_fo = None  # file object to open file for recording
    simlogs_writer = None  # writer object to record events
    sim_event_count = 0
    timestr = Util.get_time_string()
    Global.sim_start_time = time.time()
示例#4
0
 def normalize_sensors_value(self, val):
     return Util.min_max_normalization_m1_1(val,
                                            _min=0.0,
                                            _max=self.car.raycast_length)
示例#5
0
 def normalize_distance(val, _min, _max):
     return Util.min_max_normalization_m1_1(val, _min=_min, _max=_max)
示例#6
0
 def distance_lane(self):
     d = Util.distance(self.car.body.position, CENTER_POINT) - RADIUS_INNER
     return d
示例#7
0
    def __init__(self, sim_param=None):
        """
            :param sim_param: simulation parameters from command-line arguments
        """
        self.sim_param = sim_param

        # Load environment and agent config from file # assume path is cfg/<env>/<env>_<agent type>.  e.g. 'config/cartpole/cartpole_dqn'
        sys.path.append('config/' + sim_param.cfg.split('_')[0])
        config = importlib.import_module(sim_param.cfg)
        env = config.environment['env_name']
        self.problem_config = config

        print("########################")
        print("#### CL Testbed Gym ####")
        print("########################")
        print("Environment: {}\n".format(env))

        # -------------------- Simulation Parameters ----------------------
        self.render = sim_param.render  # render or not the visualization
        self.num_agents = sim_param.num_agents  # number of agents in the simulation
        self.training = sim_param.training  # decide whether to train agent or not
        self.random_agent = sim_param.random_agent  # random_agent: yes -> agent performs random action, else choose action
        self.exploration = sim_param.exploration  # agent has exploration phase
        self.collect_experiences = sim_param.collect_experiences  # agent collect experiences each timestep

        # Max number of episodes
        if sim_param.max_ep:
            self.max_ep = sim_param.max_ep
        else:
            self.max_ep = config.environment['max_ep']

        # Average score agent needs to reach to consider the problem solved
        if sim_param.solved_score:
            self.solved_score = sim_param.solved_score
        else:
            self.solved_score = config.environment['solved_score']

        self.load_model = sim_param.load_model
        self.load_all_weights = sim_param.load_all_weights
        self.load_h1h2_weights = sim_param.load_h1h2_weights
        self.load_h1_weights = sim_param.load_h1_weights
        self.load_h2_weights = sim_param.load_h2_weights
        self.load_out_weights = sim_param.load_out_weights
        self.load_h2out_weights = sim_param.load_h2out_weights
        self.load_h1out_weights = sim_param.load_h1out_weights
        self.load_mem = sim_param.load_mem
        self.load_mem_q_values = sim_param.load_mem_q_values

        self.file_to_load = sim_param.file_to_load

        self.save_model = sim_param.save_model
        self.save_mem = sim_param.save_mem
        self.save_model_freq_ep = sim_param.save_model_freq_ep
        self.save_mem_freq_ep = sim_param.save_mem_freq_ep  # TODO : not implemented, edit: not sure if really useful

        # Collaborative Learning
        self.collaboration = sim_param.collaboration
        self.cl_allweights = sim_param.cl_allweights
        self.cl_exp = sim_param.cl_exp
        self.exchange_freq = sim_param.exchange_freq

        # Directory for saving files
        self.suffix = ''
        self.sim_dir = ''
        self.simlogs_dir = ''
        self.brain_dir = ''
        self.seed_dir = ''
        self.env_name = env  # e.g. "CartPole_v0"

        # Directory for saving events, model files or memory files
        global_gym.record = sim_param.record
        if global_gym.record or sim_param.save_model or \
                sim_param.save_mem or sim_param.save_model_freq_ep or sim_param.save_mem_freq_ep:
            self.suffix = sim_param_gym.sim_suffix()
            self.sim_dir = sim_param_gym.sim_dir()
            Util.create_dir(self.sim_dir)  # create the directory
            print("Record directory: {}".format(sim_param_gym.sim_dir()))

        # Print event only in debug mode
        global_gym.debug = sim_param.debug

        self.running = True  # simulation running flag
        self.sim_count = 0  # count number of simulation

        # Create the agents
        self.agents = []
        for i in range(sim_param.num_agents):
            self.agents.append(
                AgentGym(render=sim_param.render,
                         id=i,
                         num_agents=self.num_agents,
                         config=config,
                         max_ep=self.max_ep,
                         solved_score=self.solved_score,
                         env_name=env,
                         collaboration=self.collaboration))
        for agent in self.agents:
            agent.agents = self.agents  # list of agents

        self.given_seeds = sim_param.seed  # give a seed directly
        print('seeds list', self.given_seeds)
        self.save_seed = sim_param.save_seed  # save the seeds in a txt file

        self.max_sim = sim_param.multi_sim  # max number of simulations

        self.save_record_rpu = sim_param.save_record_rpu  # record simulation logs when using Run in Parallel Universe (RPU) script
        self.seed_list = None  # used in RPU
        self.check_agents_nn_saved = None  # check at an episode if the agents have save their neural network
示例#8
0
    def step(self, action):
        observation = []

        self.car.update_friction()
        if self.manual:
            self.car.update_drive_manual()
            self.car.update_turn_manual()
        else:  # select action using AI
            self.car.update_drive(Action(action))
            self.car.update_turn(Action(action))

        self.world_step()
        self.timesteps += 1

        # Distance to goal (debug purpose)
        self.d2g = self.distance_goal

        # Orientation in lane
        self.orientation = self.orientation_lane
        observation.append(self.orientation_lane)

        # Distance in lane
        distance_lane_norm = self.normalize_distance(self.distance_lane,
                                                     _min=self.car.radius,
                                                     _max=ROAD_WIDTH -
                                                     self.car.radius)
        observation.append(distance_lane_norm)

        # Speed
        current_forward_normal = self.car.body.GetWorldVector((0, 1))
        self.car.speed = self.car.forward_velocity.dot(current_forward_normal)
        speed_norm = Util.min_max_normalization_m1_1(
            self.car.speed, _min=0., _max=self.car.max_forward_speed)
        observation.append(speed_norm)

        # Angular velocity of the car
        angular_vel = self.car.body.angularVelocity
        max_angular_vel = 3.5
        angular_vel_norm = Util.min_max_normalization_m1_1(
            angular_vel, _min=-max_angular_vel, _max=max_angular_vel)
        observation.append(angular_vel_norm)

        # Sensor's value
        self.car.read_sensors()
        for sensor in self.car.sensors:
            normed_sensor = self.normalize_sensors_value(
                sensor)  # Normalize sensor's value
            observation.append(normed_sensor)

        # Terminal condition
        if self.inside_goal:
            self.goal_reached = True
            done = True
        elif self.timesteps >= self.max_episode_steps or self.car.collision:
            done = True
        else:
            done = False

        # Reward from the environment
        reward = self.reward_function()

        state = np.array(observation, dtype=np.float32)
        return state, reward, done, None
示例#9
0
    def setup_simulations(self, sim_id=0, file_to_load=''):
        """
            Setup current simulation
            :param sim_id: id of the current simulation
            :param file_to_load: NN file or experiences file to load directly
        """
        global_gym.record = self.sim_param.record  # record simulation log

        # Set ID of simulation
        global_gym.sim_id = sim_id

        debug_gym.xprint(color=PRINT_GREEN, msg="Begin simulation")
        debug_gym.xprint(msg="Setup")

        if file_to_load:
            self.file_to_load = file_to_load

        # Variables
        self.sim_count += 1
        self.running = True
        self.check_agents_nn_saved = [False] * self.num_agents

        # Record simulation logs
        self.simlogs_dir = self.sim_dir + 'sim_logs/'
        if global_gym.record:
            debug_gym.xprint(msg="Start recording".format(sim_id))

            # CSV event file
            suffix = self.env_name + '_sim' + str(
                global_gym.sim_id) + '_' + self.suffix
            filename = debug_gym.create_record_file(dir=self.simlogs_dir,
                                                    suffix=suffix)
            global_gym.simlogs_fo = open(filename, 'a')
            global_gym.simlogs_writer = csv.writer(global_gym.simlogs_fo)
            global_gym.simlogs_writer.writerow(
                debug_gym.header)  # write header of the record file

        # RPU save simulation logs
        if self.save_record_rpu:
            global_gym.record = True

            # CSV event file
            direc = os.path.dirname(file_to_load) + '/rpu_sim_logs/'

            nn_file = os.path.basename(file_to_load)  # filename
            episode = re.sub(r'.*_(?P<episode>\d+)ep_.*', r'\g<episode>',
                             nn_file)  # extract the episode number
            suffix = self.env_name + '_' + episode + 'ep' + '_rpu'

            filename = debug_gym.create_record_file(dir=direc, suffix=suffix)
            global_gym.simlogs_fo = open(filename, 'a')
            global_gym.simlogs_writer = csv.writer(global_gym.simlogs_fo)
            global_gym.simlogs_writer.writerow(
                debug_gym.header)  # write header of the record file

        # Create brain directory (NN files and experiences files)
        if self.save_model or self.save_mem or self.save_model_freq_ep or self.save_mem_freq_ep:
            self.brain_dir = self.sim_dir + 'brain_files/' + 'sim' + str(
                global_gym.sim_id) + '/'
            Util.create_dir(self.brain_dir)

        # Seed directory (seeds for randomstate)
        if self.save_seed and global_gym.record:
            self.seed_dir = self.sim_dir + 'seeds/'
            Util.create_dir(self.seed_dir)  # create the directory

        # Setup agents
        self.setup_agents()

        debug_gym.xprint(msg="Setup complete. Start simulation")
示例#10
0
    def __init__(self, sim_param=None):
        """
            :param sim_param: simulation parameters from command-line arguments
        """
        self.sim_param = sim_param

        # Load config from file # assume path is cfg/<env>/<env>_<agent type>.  e.g. 'config/racecircleleft/racecircleleft_dqn'
        sys.path.append('config/' + sim_param.cfg.split('_')[0])
        config = importlib.import_module(sim_param.cfg)
        env = config.environment['env_name']
        self.problem_config = config

        # 2nd agent (optional, depends on the experiment)
        env2 = None
        self.problem_config2 = None
        if sim_param.cfg2:
            sys.path.append('config/' + sim_param.cfg2.split('_')[0])
            config2 = importlib.import_module(sim_param.cfg2)
            env2 = config2.environment['env_name']
            self.problem_config2 = config2

        # 3rd agent (optional, depends on the experiment)
        env3 = None
        self.problem_config3 = None
        if sim_param.cfg3:
            sys.path.append('config/' + sim_param.cfg3.split('_')[0])
            config3 = importlib.import_module(sim_param.cfg3)
            env3 = config3.environment['env_name']
            self.problem_config3 = config3

        print("###########################")
        print("#### CL Testbed Race ######")
        print("###########################")
        # -------------------- Simulation Parameters ----------------------

        self.display = sim_param.display  # render or not the visualization
        self.can_handle_events = sim_param.display  # user can control the simulation (pause, stop) using  keyboard
        self.num_agents = sim_param.num_agents  # number of agents in the simulation

        self.training = sim_param.training  # decide whether to train agent or not
        self.random_agent = sim_param.random_agent  # random_agent: yes -> agent performs random action, else choose action
        self.exploration = sim_param.exploration  # agent has exploration phase
        self.collect_experiences = sim_param.collect_experiences  # agent collect experiences each timestep

        # Max number of episodes
        if sim_param.max_ep:
            self.max_ep = sim_param.max_ep
        else:
            self.max_ep = config.environment['max_ep']

        # Average timestep agent needs to reach to consider the problem solved
        if sim_param.solved_timesteps:
            self.solved_timesteps = sim_param.solved_timesteps
        else:
            self.solved_timesteps = config.environment['solved_timesteps']

        self.load_model = sim_param.load_model
        self.load_all_weights = sim_param.load_all_weights
        self.load_h1h2_weights = sim_param.load_h1h2_weights
        self.load_h1_weights = sim_param.load_h1_weights
        self.load_h2_weights = sim_param.load_h2_weights
        self.load_out_weights = sim_param.load_out_weights
        self.load_h2out_weights = sim_param.load_h2out_weights
        self.load_h1out_weights = sim_param.load_h1out_weights
        self.load_mem = sim_param.load_mem
        self.load_mem2 = sim_param.load_mem2

        self.file_to_load = sim_param.file_to_load
        self.file_to_load2 = sim_param.file_to_load2

        self.save_model = sim_param.save_model
        self.save_mem = sim_param.save_mem
        self.save_model_freq_ep = sim_param.save_model_freq_ep
        self.save_mem_freq_ep = sim_param.save_mem_freq_ep  # TODO : not implemented, edit: not sure if really useful

        # Combining experiences
        self.give_exp = sim_param.give_exp
        self.tts = sim_param.tts

        # Directory for saving files
        self.suffix = ''
        self.sim_dir = ''
        self.simlogs_dir = ''
        self.brain_dir = ''
        self.seed_dir = ''
        self.env_name = env  # e.g. "RaceCircleLeft"
        self.env_name2 = None
        self.env_name3 = None
        if sim_param.cfg2:
            self.env_name2 = env2
        if sim_param.cfg3:
            self.env_name3 = env3

        # Directory for saving events, model files or memory files
        global_race.record = sim_param.record
        if global_race.record or sim_param.save_model or \
                sim_param.save_mem or sim_param.save_model_freq_ep or sim_param.save_mem_freq_ep:
            self.suffix = sim_param_race.sim_suffix()
            self.sim_dir = sim_param_race.sim_dir()
            Util.create_dir(self.sim_dir)  # create the directory
            print("\nRecord directory: {}".format(sim_param_race.sim_dir()))

        # Print event only in debug mode
        global_race.debug = sim_param.debug

        # Simulation running flag
        self.running = True
        self.sim_count = 0  # count number of simulation
        self.pause = False

        # Create the agents
        self.agents = []

        if sim_param.cfg2:
            self.num_agents += 1
        if sim_param.cfg3:
            self.num_agents += 1

        # for i in range(sim_param.num_agents):
        self.agents.append(
            AgentRace(display=sim_param.display,
                      id=0,
                      num_agents=self.num_agents,
                      config=config,
                      max_ep=self.max_ep,
                      solved_timesteps=self.solved_timesteps,
                      env_name=env,
                      manual=sim_param.manual))
        if sim_param.cfg2:
            agent2 = AgentRace(display=sim_param.display,
                               id=1,
                               num_agents=self.num_agents,
                               config=self.problem_config2,
                               max_ep=self.max_ep,
                               env_name=env2,
                               solved_timesteps=self.solved_timesteps,
                               manual=sim_param.manual,
                               give_exp=self.give_exp,
                               tts=self.tts)
            self.agents.append(agent2)
        if sim_param.cfg3:
            agent3 = AgentRace(display=sim_param.display,
                               id=2,
                               num_agents=self.num_agents,
                               config=self.problem_config3,
                               max_ep=self.max_ep,
                               env_name=env3,
                               solved_timesteps=self.solved_timesteps,
                               manual=sim_param.manual,
                               give_exp=self.give_exp,
                               tts=self.tts)
            self.agents.append(agent3)

        # Add reference to others agents to each agent
        for agent in self.agents:
            agent.agents = self.agents

        self.given_seeds = sim_param.seed
        print('seeds', self.given_seeds)
        self.max_sim = sim_param.multi_sim  # max number of simulations
        self.save_seed = sim_param.save_seed  # save the seeds in a txt file

        self.save_record_rpu = sim_param.save_record_rpu  # record simulation logs when using Run in Parallel Universe (RPU) script
        self.seed_list = None  # used in RPU
        self.check_agents_nn_saved = None  # check at an episode if the agents have save their neural network
示例#11
0
 def distance_lane(self):
     d = RADIUS_OUTTER - Util.distance(self.car.body.position, CENTER_POINT)
     return d
示例#12
0
    def orientation_lane(self):
        """
            Get agent orientation in lane
        """
        pos = self.car.body.position
        if self.distance_goal < np.pi * RADIUS_INNER:

            # 2nd part : Turn right
            theta = Util.angle_direct(Util.normalize(S2_POINT - C2),
                                      Util.normalize(pos - C2))
            theta = Util.deg_to_rad(theta)
            h = vec2(-RADIUS_INNER * np.cos(theta) + C2.x,
                     -RADIUS_INNER * np.sin(theta) +
                     C2.y)  # orthogonal projection
            tangent = Util.rotate(Util.normalize(C2 - h),
                                  90.0)  # tangent to the circle
        else:
            # 1st part : Turn Left
            theta = Util.angle_direct(
                Util.normalize(S1_POINT - C1),
                Util.normalize(self.car.body.position - C1))
            theta = Util.deg_to_rad(theta)
            h = vec2(RADIUS_INNER * np.cos(theta) + C1.x,
                     RADIUS_INNER * np.sin(theta) +
                     C1.y)  # orthogonal projection
            tangent = Util.rotate(Util.normalize(C1 - h),
                                  -90.0)  # tangent to the circle

        forward = Util.normalize(self.car.body.GetWorldVector((0, 1)))
        orientation = Util.angle_indirect(forward, tangent) / 180.0
        return orientation
示例#13
0
    def gizmo(self):

        pygame.draw.line(
            self.screen, Color.Yellow,
            world_to_pixels(HALFWAY, SCREEN_HEIGHT, PPM),
            world_to_pixels(HALFWAY - vec2(ROAD_WIDTH, 0), SCREEN_HEIGHT, PPM))

        if self.distance_goal < np.pi * RADIUS_INNER:

            # 2nd part : Turn right
            pygame.draw.line(self.screen, Color.Magenta,
                             world_to_pixels(C2, SCREEN_HEIGHT, PPM),
                             world_to_pixels(S2_POINT, SCREEN_HEIGHT, PPM))

            theta = Util.angle_direct(
                Util.normalize(S2_POINT - C2),
                Util.normalize(self.car.body.position - C2))
            theta = Util.deg_to_rad(theta)
            h = vec2(-RADIUS_INNER * np.cos(theta) + C2.x,
                     -RADIUS_INNER * np.sin(theta) +
                     C2.y)  # orthogonal projection
            pygame.draw.line(self.screen, Color.Red,
                             world_to_pixels(C2, SCREEN_HEIGHT, PPM),
                             world_to_pixels(h, SCREEN_HEIGHT, PPM))

            tangent = Util.rotate(Util.normalize(C2 - h),
                                  90.0)  # tangent to the circle
            pygame.draw.line(self.screen, Color.Yellow,
                             world_to_pixels(h, SCREEN_HEIGHT, PPM),
                             world_to_pixels(h + tangent, SCREEN_HEIGHT, PPM))
        else:
            # 1st part : Turn Left
            pygame.draw.line(self.screen, Color.Magenta,
                             world_to_pixels(C1, SCREEN_HEIGHT, PPM),
                             world_to_pixels(S1_POINT, SCREEN_HEIGHT, PPM))

            theta = Util.angle_direct(
                Util.normalize(S1_POINT - C1),
                Util.normalize(self.car.body.position - C1))
            theta = Util.deg_to_rad(theta)
            h = vec2(RADIUS_INNER * np.cos(theta) + C1.x,
                     RADIUS_INNER * np.sin(theta) +
                     C1.y)  # orthogonal projection
            pygame.draw.line(self.screen, Color.Red,
                             world_to_pixels(C1, SCREEN_HEIGHT, PPM),
                             world_to_pixels(h, SCREEN_HEIGHT, PPM))

            tangent = Util.rotate(Util.normalize(C1 - h),
                                  -90.0)  # tangent to the circle
            pygame.draw.line(self.screen, Color.Yellow,
                             world_to_pixels(h, SCREEN_HEIGHT, PPM),
                             world_to_pixels(h + tangent, SCREEN_HEIGHT, PPM))