Exemple #1
0
def drive(args):
    with open(args.conf.strip(), 'r') as f:
        config = json.load(f)

    modules = load_modules(config['models'])
    model = load_agent('imitation_learning_agent').load('lstm-2.h5')

    agent = ControlAgent(model, modules)
    experiment_suite = CoRL2017('Town01')
    run_driving_benchmark(agent, experiment_suite)
Exemple #2
0
# download dataset if it doesn't exist
if not os.path.exists(agent_params.memory.load_memory_from_file_path):
    screen.log_title("The CARLA dataset is not present in the following path: {}"
                     .format(agent_params.memory.load_memory_from_file_path))
    result = screen.ask_yes_no("Do you want to download it now?")
    if result:
        create_dataset(None, "./datasets/carla_train_set_replay_buffer.p")
    else:
        screen.error("Please update the path to the CARLA dataset in the CARLA_CIL preset", crash=True)


###############
# Environment #
###############
env_params = CarlaEnvironmentParameters()
env_params.level = 'town1'
env_params.cameras = ['CameraRGB']
env_params.camera_height = 600
env_params.camera_width = 800
env_params.separate_actions_for_throttle_and_brake = True
env_params.allow_braking = True
env_params.quality = CarlaEnvironmentParameters.Quality.EPIC
env_params.experiment_suite = CoRL2017('Town01')

vis_params = VisualizationParameters()
vis_params.video_dump_methods = [SelectedPhaseOnlyDumpMethod(RunPhase.TEST)]
vis_params.dump_mp4 = True

graph_manager = BasicRLGraphManager(agent_params=agent_params, env_params=env_params,
                                    schedule_params=schedule_params, vis_params=vis_params)
        '--log_name',
        metavar='T',
        default='test',
        help='The name of the log file to be created by the scripts')

    argparser.add_argument(
        '--avoid-stopping',
        default=True,
        action='store_false',
        help=' Uses the speed prediction branch to avoid unwanted agent stops')
    argparser.add_argument(
        '--continue-experiment',
        action='store_true',
        help='If you want to continue the experiment with the given log name')

    args = argparser.parse_args()

    log_level = logging.DEBUG if args.debug else logging.INFO
    logging.basicConfig(format='%(levelname)s: %(message)s', level=log_level)

    logging.info('listening to server %s:%s', args.host, args.port)

    agent = ImitationLearning(args.city_name, args.avoid_stopping)
    if args.test_name == 'CORL2017':
        corl = CoRL2017(args.city_name)
    else:
        corl = trainingData(args.city_name)
    # Now actually run the driving_benchmark
    run_driving_benchmark(agent, corl, args.city_name, args.log_name,
                          args.continue_experiment, args.host, args.port)
    args = argparser.parse_args()
    if args.debug:
        log_level = logging.DEBUG
    elif args.verbose:
        log_level = logging.INFO
    else:
        log_level = logging.WARNING

    logging.basicConfig(format='%(levelname)s: %(message)s', level=log_level)
    logging.info('listening to server %s:%s', args.host, args.port)

    # We instantiate a forward agent, a simple policy that just set
    # acceleration as 0.9 and steering as zero
    agent = L5Agent()

    # We instantiate an experiment suite. Basically a set of experiments
    # that are going to be evaluated on this benchmark.
    if args.corl_2017:
        experiment_suite = CoRL2017(args.city_name)
    else:
        print (' WARNING: running the basic driving benchmark, to run for CoRL 2017'
               ' experiment suites, you should run'
               ' python driving_benchmark_example.py --corl-2017')
        experiment_suite = BasicExperimentSuite(args.city_name)

    # Now actually run the driving_benchmark
    run_driving_benchmark(agent, experiment_suite, args.city_name,
                          args.log_name, args.continue_experiment,
                          args.host, args.port)
Exemple #5
0
        help='The name of the model for writing in the reports')
    argparser.add_argument('-p',
                           '--path',
                           metavar='P',
                           default='test',
                           help='Path to all log files')

    args = argparser.parse_args()

    log_level = logging.DEBUG if args.debug else logging.INFO
    logging.basicConfig(format='%(levelname)s: %(message)s', level=log_level)

    logging.info('sarting the calculations %s',
                 "0")  #TODO: add time instead on zero

    experiment_suite = CoRL2017("Town01")

    metrics_object = Metrics(experiment_suite.metrics_parameters,
                             experiment_suite.dynamic_tasks)

    # Improve readability by adding a weather dictionary
    weather_name_dict = {
        1: 'Clear Noon',
        3: 'After Rain Noon',
        6: 'Heavy Rain Noon',
        8: 'Clear Sunset',
        4: 'Cloudy After Rain',
        14: 'Soft Rain Sunset'
    }

    # names for all the test logs
Exemple #6
0
    def __init__(self,
                 num_speedup_steps=30,
                 require_explicit_reset=True,
                 is_render_enabled=False,
                 early_termination_enabled=False,
                 run_offscreen=False,
                 save_screens=False,
                 port=2000,
                 gpu=0,
                 discrete_control=True,
                 kill_when_connection_lost=True,
                 city_name="Town01",
                 channel_last=True):
        EnvironmentWrapper.__init__(self, is_render_enabled, save_screens)

        print("port:", port)

        self.episode_max_time = 1000000
        self.allow_braking = True
        self.log_path = os.path.join(DEFAULT_CARLA_LOG_DIR, "CarlaLogs.txt")
        self.num_speedup_steps = num_speedup_steps
        self.is_game_ready_for_input = False
        self.run_offscreen = run_offscreen
        self.kill_when_connection_lost = kill_when_connection_lost
        # server configuration

        self.port = port
        self.gpu = gpu
        self.host = 'localhost'
        self.level = 'town1'
        self.map = CarlaLevel().get(self.level)

        # experiment = basic_experiment_suite.BasicExperimentSuite(city_name)
        experiment = CoRL2017(city_name)
        self.experiments = experiment.get_experiments()
        self.experiment_type = 0
        self.planner = Planner(city_name)

        self.car_speed = 0
        self.is_game_setup = False  # Will be true only when setup_client_and_server() is called, either explicitly, or by reset()

        # action space
        self.discrete_controls = discrete_control
        self.action_space_size = 3
        self.action_space_high = np.array([1, 1, 1])
        self.action_space_low = np.array([-1, -1, -1])
        self.action_space_abs_range = np.maximum(
            np.abs(self.action_space_low), np.abs(self.action_space_high))
        self.steering_strength = 0.35
        self.gas_strength = 1.0
        self.brake_strength = 0.6
        self.actions = {
            0: [0., 0.],
            1: [0., -self.steering_strength],
            2: [0., self.steering_strength],
            3: [self.gas_strength - 0.15, 0.],
            4: [-self.brake_strength, 0],
            5: [self.gas_strength - 0.3, -self.steering_strength],
            6: [self.gas_strength - 0.3, self.steering_strength],
            7: [-self.brake_strength, -self.steering_strength],
            8: [-self.brake_strength, self.steering_strength]
        }
        self.actions_description = [
            'NO-OP', 'TURN_LEFT', 'TURN_RIGHT', 'GAS', 'BRAKE',
            'GAS_AND_TURN_LEFT', 'GAS_AND_TURN_RIGHT', 'BRAKE_AND_TURN_LEFT',
            'BRAKE_AND_TURN_RIGHT'
        ]
        if discrete_control:
            self.action_space = Discrete(len(self.actions))
        else:
            self.action_space = Box(low=self.action_space_low,
                                    high=self.action_space_high)
        self.observation_space = Box(low=-np.inf,
                                     high=np.inf,
                                     shape=[88, 200, 3])

        # measurements
        self.measurements_size = (1, )

        self.pre_image = None
        self.first_debug = True
        self.channel_last = channel_last