def test_get_webui(shutdown_only): addresses = ray.init(include_webui=True, num_cpus=1) webui_url = addresses["webui_url"] assert ray.get_webui_url() == webui_url assert re.match(r"^(localhost|\d+\.\d+\.\d+\.\d+):\d+$", webui_url) start_time = time.time() while True: try: node_info = requests.get("http://" + webui_url + "/api/node_info").json() break except requests.exceptions.ConnectionError: if time.time() > start_time + 30: error_log = None out_log = None with open( "{}/logs/dashboard.out".format( addresses["session_dir"]), "r") as f: out_log = f.read() with open( "{}/logs/dashboard.err".format( addresses["session_dir"]), "r") as f: error_log = f.read() raise Exception( "Timed out while waiting for dashboard to start. " "Dashboard output log: {}\n" "Dashboard error log: {}\n".format(out_log, error_log)) assert node_info["error"] is None assert node_info["result"] is not None assert isinstance(node_info["timestamp"], float)
def test_get_webui(shutdown_only): addresses = ray.init(include_webui=True, num_cpus=1) webui_url = addresses["webui_url"] assert ray.get_webui_url() == webui_url assert re.match(r"^http://\d+\.\d+\.\d+\.\d+:8080$", webui_url) start_time = time.time() while True: try: node_info = requests.get(webui_url + "/api/node_info").json() break except requests.exceptions.ConnectionError: if time.time() > start_time + 30: raise Exception( "Timed out while waiting for dashboard to start.") assert node_info["error"] is None assert node_info["result"] is not None assert isinstance(node_info["timestamp"], float)
def test_get_webui(shutdown_only): addresses = ray.init(include_webui=True, num_cpus=1) webui_url = addresses["webui_url"] assert ray.get_webui_url() == webui_url base, token = webui_url.split("?") assert token.startswith("token=") start_time = time.time() while True: try: node_info = requests.get(base + "api/node_info?" + token).json() break except requests.exceptions.ConnectionError: if time.time() > start_time + 30: raise Exception( "Timed out while waiting for dashboard to start.") assert node_info["error"] is None assert node_info["result"] is not None assert isinstance(node_info["timestamp"], float)
while True: current_payoff_matrix_size = size_checker.get_size_of_current_payoff_table( ) if current_payoff_matrix_size < expected_payoff_matrix_size: logger.info( f"waiting for payoff matrix to reach size {expected_payoff_matrix_size} (currently {current_payoff_matrix_size})..." ) time.sleep(5) # elif current_payoff_matrix_size > expected_payoff_matrix_size: # raise ValueError(f"payoff matrix is now larger than expected (expected {expected_payoff_matrix_size}, currently {current_payoff_matrix_size})") else: break expected_payoff_matrix_size = current_payoff_matrix_size + 1 ray.init(address=os.getenv('RAY_HEAD_NODE'), ignore_reinit_error=True) logger.info("Ray Web UI at {}".format(ray.get_webui_url())) base_experiment_name = f"{CLOUD_PREFIX}learner_{POKER_GAME_VERSION}_sac_arch1_psro_sequential_explore_coeff_{PSRO_EXPLORATION_COEFF}" full_experiment_name = f"{base_experiment_name}_{gethostname()}_pid_{os.getpid()}_{datetime_str()}" experiment_save_dir = os.path.join(DEFAULT_RESULTS_DIR, full_experiment_name) def init_static_policy_distribution_after_trainer_init_callback( trainer): trainer.storage_client = connect_storage_client() logger.info("Initializing trainer manager interface") trainer.manager_interface = LearnerManagerInterface( server_host=MANAGER_SEVER_HOST, port=MANAGER_PORT, worker_id=full_experiment_name,
#### Plot the simulation results ################################################################### logger.plot() #################################################################################################### #### Part 2 of 2 of _devp.y: training and testing MARLFlockAviary as an RLlib MultiAgentEnv ######## #################################################################################################### elif PART == 2: #### WIP notes ##################################################################################### #### use ENV_STATE: github.com/ray-project/ray/blob/master/rllib/examples/env/two_step_game.py ##### #### Initialize Ray Tune ########################################################################### ray.shutdown() ray.init(ignore_reinit_error=True) print("Dashboard URL: http://{}".format(ray.get_webui_url())) #### Register the environment ###################################################################### register_env( "marl-flock-aviary-v0", lambda _: MARLFlockAviary(drone_model=DRONE, num_drones=NUM_DRONES, physics=PHYSICS, freq=SIMULATION_FREQ_HZ)) #### Set up the trainer's config ################################################################### config = ppo.DEFAULT_CONFIG.copy() config["num_workers"] = 0 config["env"] = "marl-flock-aviary-v0" #### Unused env to extract correctly sized action and observation spaces ########################### unused_env = MARLFlockAviary(num_drones=NUM_DRONES)