예제 #1
0
def main(output_folder_path: Path):
    # Set gym-carla environment
    agent_config = AgentConfig.parse_file(
        Path("configurations/agent_configuration.json"))
    carla_config = CarlaConfig.parse_file(
        Path("configurations/carla_configuration.json"))

    params = {
        "agent_config": agent_config,
        "carla_config": carla_config,
        "ego_agent_class": RLOccuMapE2ETrainingAgent,
        "max_collision": 5,
    }

    env = gym.make('roar-occu-map-e2e-v0', params=params)
    env.reset()
    model_params: dict = {
        "verbose": 1,
        "env": env,
        "render": True,
        "tensorboard_log": (output_folder_path / "tensorboard").as_posix(),
        "buffer_size": 10000,
        "nb_rollout_steps": 100,
        # "batch_size": 16,
        "nb_eval_steps": 50
    }
    model, callbacks = setup(model_params, output_folder_path)
    model = model.learn(total_timesteps=int(1e6),
                        callback=callbacks,
                        reset_num_timesteps=False)
예제 #2
0
def run(agent_class,
        agent_config_file_path: Path,
        carla_config_file_path: Path,
        num_laps: int = 10) -> Tuple[float, int, int]:
    """
    Run the agent along the track and produce a score based on certain metrics
    Args:
        num_laps: int number of laps that the agent should run
        agent_class: the participant's agent
        agent_config_file_path: agent configuration path
        carla_config_file_path: carla configuration path
    Returns:
        float between 0 - 1 representing scores
    """

    agent_config: AgentConfig = AgentConfig.parse_file(agent_config_file_path)
    carla_config = CarlaConfig.parse_file(carla_config_file_path)

    # hard code agent config such that it reflect competition requirements
    agent_config.num_laps = num_laps
    carla_runner = CarlaRunner(carla_settings=carla_config,
                               agent_settings=agent_config,
                               npc_agent_class=PurePursuitAgent,
                               competition_mode=True,
                               lap_count=num_laps)
    try:
        my_vehicle = carla_runner.set_carla_world()
        agent = agent_class(vehicle=my_vehicle, agent_settings=agent_config)
        carla_runner.start_game_loop(agent=agent, use_manual_control=False)
        return compute_score(carla_runner)
    except Exception as e:
        print(f"something bad happened during initialization: {e}")
        carla_runner.on_finish()
        logging.error(f"{e}. Might be a good idea to restart Server")
        return 0, 0, False
예제 #3
0
def run(agent_class, agent_config_file_path: Path,
        carla_config_file_path: Path) -> Tuple[float, int, bool]:
    """
    Run the agent along the track and produce a score based on certain metrics
    Args:
        agent_class: the participant's agent
        agent_config_file_path: agent configuration path
        carla_config_file_path: carla configuration path
    Returns:
        float between 0 - 1 representing scores
    """
    agent_config = AgentConfig.parse_file(agent_config_file_path)
    carla_config = CarlaConfig.parse_file(carla_config_file_path)

    carla_runner = CarlaRunner(carla_settings=carla_config,
                               agent_settings=agent_config,
                               npc_agent_class=PurePursuitAgent,
                               competition_mode=True,
                               max_collision=3)
    try:
        my_vehicle = carla_runner.set_carla_world()
        agent = agent_class(vehicle=my_vehicle, agent_settings=agent_config)
        carla_runner.start_game_loop(agent=agent, use_manual_control=True)
        return compute_score(carla_runner)
    except Exception as e:
        print(f"something bad happened during initialization: {e}")
        carla_runner.on_finish()
        logging.error(f"{e}. Might be a good idea to restart Server")
        return 0, 0, False
예제 #4
0
def main():
    agent_config = AgentConfig.parse_file(
        Path("./ROAR_Sim/configurations/agent_configuration.json"))
    carla_config = CarlaConfig.parse_file(
        Path("./ROAR_Sim/configurations/configuration.json"))

    carla_runner = CarlaRunner(carla_settings=carla_config,
                               agent_settings=agent_config,
                               npc_agent_class=PurePursuitAgent)
    ''' Data collection code. Currently unnecessary
    # make csv file to store some data in
    # we have current position x, y, z, current velocity x, y, z, next waypoint position x, y, z,
    # next waypoint direction relative to the current position of the car x, y, z, steering, and throttle
    csvNotes = "{}\n{}\n".format("we have current car position x, y, z, current car velocity x, y, z, next waypoint position x, y, z,", 
                               "next waypoint direction relative to the current position of the car x, y, z, steering, and throttle")
    csvHeader = "px, py, pz, vx, vy, vz, wpx, wpy, wpz, wvx, wvy, wvz, steering, throttle\n"
    with open("tmp/pid_data.csv", "w") as f:
        f.write(csvNotes)
        f.write(csvHeader)
    '''

    try:
        my_vehicle = carla_runner.set_carla_world()
        # agent = PIDAgent(vehicle=my_vehicle, agent_settings=agent_config)
        agent = LQRAgent(vehicle=my_vehicle, agent_settings=agent_config)
        carla_runner.start_game_loop(agent=agent, use_manual_control=False)
    except Exception as e:
        logging.error(f"Something bad happened during initialization: {e}")
        carla_runner.on_finish()
        logging.error(f"{e}. Might be a good idea to restart Server")
예제 #5
0
def main(output_folder_path:Path):
    # Set gym-carla environment
    agent_config = AgentConfig.parse_file(Path("configurations/agent_configuration.json"))
    carla_config = CarlaConfig.parse_file(Path("configurations/carla_configuration.json"))

    params = {
        "agent_config": agent_config,
        "carla_config": carla_config,
        "ego_agent_class": RLPIDAgent,
        "max_collision": 5
    }

    env = gym.make('roar-pid-v0', params=params)
    env.reset()

    model_params: dict = {
        "verbose": 1,
        "render": True,
        "tensorboard_log": (output_folder_path / "tensorboard").as_posix()
    }
    latest_model_path = find_latest_model(output_folder_path)
    if latest_model_path is None:
        model = DDPG(LnMlpPolicy, env=env, **model_params)  # full tensorboard log can take up space quickly
    else:
        model = DDPG.load(latest_model_path, env=env, **model_params)
        model.render = True
        model.tensorboard_log = (output_folder_path / "tensorboard").as_posix()

    logging_callback = LoggingCallback(model=model)
    checkpoint_callback = CheckpointCallback(save_freq=1000, verbose=2, save_path=(output_folder_path / "checkpoints").as_posix())
    event_callback = EveryNTimesteps(n_steps=100, callback=checkpoint_callback)
    callbacks = CallbackList([checkpoint_callback, event_callback, logging_callback])
    model = model.learn(total_timesteps=int(1e10), callback=callbacks, reset_num_timesteps=False)
    model.save(f"pid_ddpg_{datetime.now()}")
예제 #6
0
    def __init__(self):
        # Set gym-carla environment
        agent_config = AgentConfig.parse_file(
            Path("configurations/agent_configuration.json"))
        carla_config = CarlaConfig.parse_file(
            Path("configurations/carla_configuration.json"))

        params = {
            "agent_config": agent_config,
            "carla_config": carla_config,
            "ego_agent_class": RLOccuMapE2ETrainingAgent,
            "max_collision": 5,
        }
        super().__init__(params)
        # action space = next waypoint
        self.view_size = 200
        self.max_steering_angle = 1
        self.action_space = gym.spaces.Box(
            low=np.array([0.4, -self.max_steering_angle]),
            high=np.array([1, self.max_steering_angle]),
            dtype=np.float32)  # throttle, steering
        self.observation_space = gym.spaces.Box(low=0,
                                                high=1,
                                                shape=(self.view_size,
                                                       self.view_size, 1),
                                                dtype=np.uint8)
        self.debug_info: OrderedDict = OrderedDict()
        self.prev_location: Optional[Location] = None
        self.prev_next_waypoint: Optional[Location] = None
        self.dist_diff = 0
예제 #7
0
def main(output_folder_path: Path):
    # Set gym-carla environment
    agent_config = AgentConfig.parse_file(
        Path("configurations/agent_configuration.json"))
    carla_config = CarlaConfig.parse_file(
        Path("configurations/carla_configuration.json"))

    params = {
        "agent_config": agent_config,
        "carla_config": carla_config,
        "ego_agent_class": RLLocalPlannerAgent,
        "max_collision": 5,
    }

    env = gym.make('roar-local-planner-v0', params=params)
    env.reset()

    model_params: dict = {
        "verbose": 1,
        "render": True,
        "env": env,
        "n_cpu_tf_sess": None,
        "buffer_size": 1000,
        "nb_train_steps": 50,
        "nb_rollout_steps": 100,
        # "nb_eval_steps": 50,
        "batch_size": 32,
    }
    latest_model_path = find_latest_model(Path(output_folder_path))
    if latest_model_path is None:
        model = DDPG(CnnPolicy, **model_params)
    else:
        model = DDPG.load(latest_model_path, **model_params)
    tensorboard_dir = (output_folder_path / "tensorboard")
    ckpt_dir = (output_folder_path / "checkpoints")
    tensorboard_dir.mkdir(parents=True, exist_ok=True)
    ckpt_dir.mkdir(parents=True, exist_ok=True)
    model.tensorboard_log = tensorboard_dir.as_posix()
    model.render = True
    logging_callback = LoggingCallback(model=model)
    checkpoint_callback = CheckpointCallback(save_freq=1000,
                                             verbose=2,
                                             save_path=ckpt_dir.as_posix())
    event_callback = EveryNTimesteps(n_steps=100, callback=checkpoint_callback)
    callbacks = CallbackList(
        [checkpoint_callback, event_callback, logging_callback])
    model = model.learn(total_timesteps=int(1e10),
                        callback=callbacks,
                        reset_num_timesteps=False)
    model.save(f"local_planner_ddpg_{datetime.now()}")
예제 #8
0
def main():
    try:
        agent_config = AgentConfig.parse_file(Path("./ROAR_Jetson/configurations/agent_configuration.json"))
        jetson_config = JetsonConfig.parse_file(Path("./ROAR_Jetson/configurations/configuration.json"))

        try:
            prepare(jetson_config=jetson_config)
        except Exception as e:
            logging.error(f"Ignoring Error during setup: {e}")
        agent = PIDAgent(vehicle=Vehicle(), agent_settings=agent_config, should_init_default_cam=False)
        jetson_runner = JetsonRunner(agent=agent, jetson_config=jetson_config)
        jetson_runner.start_game_loop(use_manual_control=False)
    except Exception as e:
        print(f"Something bad happened {e}")
예제 #9
0
def main():
    agent_config = AgentConfig.parse_file(Path("../ROAR/configurations/carla_configuration.json"))
    carla_config = CarlaConfig.parse_file(Path("../ROAR_Sim/configurations/carla_configuration.json"))

    carla_runner = CarlaRunner(carla_settings=carla_config, agent_settings=agent_config,
                               npc_agent_class=PurePursuitAgent)
    try:
        my_vehicle = carla_runner.set_carla_world()
        agent = PointCloudMapRecordingAgent(vehicle=my_vehicle, agent_settings=agent_config)
        carla_runner.start_game_loop(agent=agent, use_manual_control=False)
    except Exception as e:
        print("Ending abnormally: ", e)
        carla_runner.on_finish()
        logging.error(f"Hint: Might be a good idea to restart Server. ")
예제 #10
0
def main():
    try:

        agent_config = AgentConfig.parse_file(
            Path("./ROAR_Jetson/configurations/agent_configuration.json"))
        jetson_config = JetsonConfig.parse_file(
            Path("./ROAR_Jetson/configurations/configuration.json"))

        prepare(jetson_config=jetson_config)
        agent = ForwardOnlyAgent(vehicle=Vehicle(),
                                 agent_settings=agent_config)
        jetson_runner = JetsonRunner(agent=agent, jetson_config=jetson_config)
        jetson_runner.start_game_loop(use_manual_control=True)
    except Exception as e:
        print(f"Something bad happened {e}")
예제 #11
0
def main(output_folder_path: Path):
    # Set gym-carla environment
    agent_config = AgentConfig.parse_file(
        Path("configurations/agent_configuration.json"))
    carla_config = CarlaConfig.parse_file(
        Path("configurations/carla_configuration.json"))

    params = {
        "agent_config": agent_config,
        "carla_config": carla_config,
        "ego_agent_class": RLLocalPlannerAgent,
        "max_collision": 5,
    }

    env = gym.make('roar-local-planner-v1', params=params)
    env.reset()

    tensorboard_dir, ckpt_dir = prep_dir(output_folder_path)
    model_params: dict = {
        "verbose": 1,
        "render": True,
        "env": env,
        "n_cpu_tf_sess": 2,
        "buffer_size": 10,
        "random_exploration": 0.1,
        "tensorboard_log": tensorboard_dir.as_posix(),
    }
    latest_model_path = find_latest_model(Path(output_folder_path))
    if latest_model_path is None:
        model = DDPG(
            LnMlpPolicy,
            **model_params)  # full tensorboard log can take up space quickly
    else:
        model = DDPG.load(latest_model_path, **model_params)

    logging_callback = LoggingCallback(model=model)
    checkpoint_callback = CheckpointCallback(save_freq=1000,
                                             verbose=2,
                                             save_path=ckpt_dir.as_posix())
    event_callback = EveryNTimesteps(n_steps=100, callback=checkpoint_callback)
    callbacks = CallbackList(
        [checkpoint_callback, event_callback, logging_callback])
    model = model.learn(total_timesteps=int(1e10),
                        callback=callbacks,
                        reset_num_timesteps=False)
    model.save(f"local_planner_v1_ddpg_{datetime.now()}")
예제 #12
0
파일: runner_sim.py 프로젝트: Trance-0/ROAR
def main():
    agent_config = AgentConfig.parse_file(
        Path("./ROAR_Sim/configurations/agent_configuration.json"))
    carla_config = CarlaConfig.parse_file(
        Path("./ROAR_Sim/configurations/configuration.json"))

    carla_runner = CarlaRunner(carla_settings=carla_config,
                               agent_settings=agent_config,
                               npc_agent_class=PurePursuitAgent)
    try:
        my_vehicle = carla_runner.set_carla_world()
        agent = PIDAgent(vehicle=my_vehicle, agent_settings=agent_config)
        carla_runner.start_game_loop(agent=agent, use_manual_control=False)
    except Exception as e:
        logging.error(f"Something bad happened during initialization: {e}")
        carla_runner.on_finish()
        logging.error(f"{e}. Might be a good idea to restart Server")
예제 #13
0
파일: runner_sim.py 프로젝트: mavabene/ROAR
def main():
    """Starts game loop"""
    agent_config = AgentConfig.parse_file(
        Path("./ROAR_Sim/configurations/agent_configuration.json"))
    carla_config = CarlaConfig.parse_file(
        Path("./ROAR_Sim/configurations/configuration.json"))

    carla_runner = CarlaRunner(carla_settings=carla_config,
                               agent_settings=agent_config,
                               npc_agent_class=PurePursuitAgent)
    try:
        my_vehicle = carla_runner.set_carla_world()

        #agent = PIDAgent(vehicle=my_vehicle, agent_settings=agent_config)
        #agent = OccupancyMapAgent(vehicle=my_vehicle, agent_settings=agent_config)
        #agent = PurePursuitAgent(vehicle=my_vehicle, agent_settings=agent_config)

        #agent = JAM1Agent_old(vehicle=my_vehicle, agent_settings=agent_config)  # *** roll controller
        agent = PIDRollAgent(
            vehicle=my_vehicle,
            agent_settings=agent_config)  # *** roll controller

        #agent = JAM1Agent(vehicle=my_vehicle, agent_settings=agent_config)
        #agent = JAM2Agent(vehicle=my_vehicle, agent_settings=agent_config)
        #agent = JAM3Agent_old(vehicle=my_vehicle, agent_settings=agent_config) # *** bstanley
        #agent = JAM3Agent(vehicle=my_vehicle, agent_settings=agent_config)

        # *** use to record new waypoints ***
        # waypointrecord = agent.bstanley_controller.blat_stanley_controller.waypointrecord
        # np.save("James_waypoints", np.array(waypointrecord))

        #agent = RecordingAgent(vehicle=my_vehicle, agent_settings=agent_config)

        #carla_runner.start_game_loop(agent=agent, use_manual_control=True)#*******True for manual control, False auto
        carla_runner.start_game_loop(
            agent=agent, use_manual_control=False
        )  # *******True for manual control, False auto

    except Exception as e:
        logging.error(f"Something bad happened during initialization: {e}")
        carla_runner.on_finish()
        logging.error(f"{e}. Might be a good idea to restart Server")
예제 #14
0
    logging.getLogger("matplotlib").setLevel(logging.WARNING)
    logging.getLogger("urllib3").setLevel(logging.WARNING)

    parser = argparse.ArgumentParser()
    parser.add_argument("-auto", action='store_true', help="Enable auto control")
    parser.add_argument("-m", "--mode", choices=choices, help="AR or VR [WARNING not implemented yet!]", default="vr")
    parser.add_argument("-r", "--reconnect", action='store_true', help="Scan QR code to attach phone to PC")
    parser.add_argument("-u", "--use_unity", action='store_true',
                        help="Use unity as rendering and control service")
    parser.add_argument("-g", "--use_glove", help="use glove based controller by supplying its ip address!")
    args = parser.parse_args()

    try:
        agent_config_file_path = Path("ROAR/configurations/iOS/iOS_agent_configuration.json")
        ios_config_file_path = Path("ROAR_iOS/configurations/ios_config.json")
        agent_config = AgentConfig.parse_file(agent_config_file_path)
        ios_config: iOSConfig = iOSConfig.parse_file(ios_config_file_path)
        ios_config.ar_mode = True if args.mode == "ar" else False
        if args.use_glove:
            try:
                is_glove_online(args.use_glove, port=81)
                ios_config.glove_ip_addr = args.use_glove
                ios_config.should_use_glove = True
            except requests.exceptions.ConnectTimeout as e:
                print(f"ERROR. Cannot find Glove at that ip address {args.use_glove}. Shutting down...")
                exit(0)
        else:
            ios_config.should_use_glove = False

        success = False
        if args.reconnect:
예제 #15
0
def main():
    """Starts game loop"""
    carla_config = CarlaConfig.parse_file(Path("./ROAR_Sim/configurations/configuration.json"))
    agent_config = AgentConfig.parse_file(Path("./ROAR_Sim/configurations/agent_configuration.json"))

    """
    Pit Stop:
        Use different kinds of 'set' functions at PitStop to tune/fix your own car!
    """
    pitstop = PitStop(carla_config, agent_config)
    pitstop.set_carla_version(version = "0.9.9")
    pitstop.set_carla_sync_mode(False)
    pitstop.set_autopilot_mode(True)
    pitstop.set_car_color(CarlaCarColor(r = 255,g = 200,b = 00,a = 255))
    pitstop.set_num_laps(num=1)
    pitstop.set_output_data_folder_path("./data/output")
    pitstop.set_output_data_file_name(time.strftime("%Y%m%d-%H%M%S-") + "map-waypoints")
    pitstop.set_max_speed(speed = 200)
    pitstop.set_target_speed(speed = 30)
    print(agent_config.target_speed, " target speed")
    pitstop.set_steering_boundary(boundary = (-1.0, 1.0))
    pitstop.set_throttle_boundary(boundary = (0, 0.5))
    pitstop.set_waypoints_look_ahead_values(values={
                                                    "60": 5,
                                                    "80": 10,
                                                    "120": 20,
                                                    "180": 50})
    pid_value = {
                    "longitudinal_controller": {
                        "40": {
                            "Kp": 0.8,
                            "Kd": 0.2,
                            "Ki": 0
                        },
                        "60": {
                            "Kp": 0.5,
                            "Kd": 0.2,
                            "Ki": 0
                        },
                        "150": {
                            "Kp": 0.2,
                            "Kd": 0.1,
                            "Ki": 0.1
                            }
                    },
                    "latitudinal_controller": {
                        "60": {
                            "Kp": 0.8,
                            "Kd": 0.1,
                            "Ki": 0.1
                        },
                        "100": {
                            "Kp": 0.6,
                            "Kd": 0.2,
                            "Ki": 0.1
                        },
                        "150": {
                            "Kp": 0.5,
                            "Kd": 0.2,
                            "Ki": 0.1
                            }
                    }
                }
    pitstop.set_pid_values(pid_value)

    """Passing configurations to Carla and Agent"""
    carla_runner = CarlaRunner(carla_settings=carla_config, # ROAR Academy: fine
                               agent_settings=agent_config, # ROAR Academy: fine
                               npc_agent_class=PurePursuitAgent)
    try:
        my_vehicle = carla_runner.set_carla_world()

        agent = PIDAgent(vehicle=my_vehicle, agent_settings=agent_config)
        # agent = WaypointGeneratigAgent(vehicle=my_vehicle, agent_settings=agent_config)
        
        carla_runner.start_game_loop(agent=agent, use_manual_control=False) # for PIDAgent
        # carla_runner.start_game_loop(agent=agent, use_manual_control=True) # for WaypointGeneratingAgent
    
    except Exception as e:
        logging.error(f"Something bad happened during initialization: {e}")
        carla_runner.on_finish()
        logging.error(f"{e}. Might be a good idea to restart Server")