config.environment = gym.make(env_title) config.num_episodes_to_run = 2000 config.show_solution_score = False config.visualise_individual_results = True config.visualise_overall_agent_results = True config.standard_deviation_results = 1.0 config.runs_per_agent = 1 config.use_GPU = True config.overwrite_existing_results_file = False config.randomise_random_seed = True config.save_model = True config.log_loss = False config.log_base = time.strftime("%Y%m%d%H%M%S", time.localtime()) config.save_model_freq = 300 ## save model per 300 episodes config.retrain = False config.resume = False config.resume_path = 'C:\my_project\RL-based-decision-making-in-Carla\\results\Models\DDQN with Prioritised Replay\DDQN with Prioritised Replay_1500.model' config.backbone_pretrain = False config.force_explore_mode = True config.force_explore_stare_e = 0.4 ## when the std of rolling score in last 10 window is smaller than this val, start explore mode config.force_explore_rate = 0.95 ## only when the current score bigger than 0.8*max(rolling score[-10:]), forece expolre ## data and graphs save dir ## data_results_root = os.path.join(os.path.dirname(__file__)+"/data_and_graphs/carla_obstacle_avoidance", config.log_base) while os.path.exists(data_results_root): data_results_root += '_' os.makedirs(data_results_root) config.file_to_save_data_results = os.path.join(data_results_root, "data.pkl") config.file_to_save_results_graph = os.path.join(data_results_root, "data.png")
config.environment = gym.make(env_title) config.num_episodes_to_run = 2000 config.show_solution_score = False config.visualise_individual_results = True config.visualise_overall_agent_results = True config.standard_deviation_results = 1.0 config.runs_per_agent = 1 config.use_GPU = True config.overwrite_existing_results_file = False config.randomise_random_seed = True config.save_model = True config.log_loss = False config.log_base = time.strftime("%Y%m%d%H%M%S", time.localtime()) config.save_model_freq = 300 ## save model per 300 episodes config.retrain = True config.resume = False config.resume_path = 'E:\\reinforcement-learning-based-driving-decision-in-Carla\\results\Models\ObstacleAvoidance-v0\DDQN with Prioritised Replay\\20200611150242\\rolling_score_68.0417.model' config.backbone_pretrain = False config.force_explore_mode = True config.force_explore_stare_e = 0.2 ## when the std of rolling score in last 10 window is smaller than this val, start explore mode config.force_explore_rate = 0.95 ## only when the current score bigger than 0.8*max(rolling score[-10:]), forece expolre ## data and graphs save dir ## data_results_root = os.path.join( os.path.dirname(__file__) + "/data_and_graphs/carla_obstacle_avoidance", config.log_base) while os.path.exists(data_results_root): data_results_root += '_' os.makedirs(data_results_root)