def set_traffic_matrix(self, index): traffic_file = self.topo.get_traffic_pattern(index) self.input_file = '%s/%s/%s' % (self.conf["input_dir"], self.conf["topo"], traffic_file) if self.conf["id"] != "": self.conf["output_dir"] += "/%s" % self.conf["id"] check_dir(self.conf["output_dir"])
def __init__(self, net_man, transport, out_dir): self.net_man = net_man self.service_procs = [] self.traffic_procs = [] self._set_t_type(transport) self.out_dir = out_dir dc_utils.check_dir(out_dir) self._init_services()
def init(): for pattern in TF_PATTERNS: for rate in NETWORK_RATES: testname = generate_testname(OUTPUT_DIR) results_dir = "%s/%s" % (OUTPUT_DIR, testname) log.info("Saving results to %s" % results_dir) dc_utils.check_dir(results_dir) log.info("Dumping configuration in %s" % results_dir) dump_config(results_dir, pattern) run_tests(results_dir, pattern, rate) # Plot the results and save the graphs under the given test name plot(results_dir, PLOT_DIR, testname)
def main(args=None): # Fix a bug introduced by an annoying Google extension import absl.logging try: logging.root.removeHandler(absl.logging._absl_handler) absl.logging._warn_preinit_stderr = False except Exception as e: print("Failed to fix absl logging bug", e) logging.basicConfig(format="%(levelname)s:%(message)s", level=logging.INFO) args = get_args(args) if args is None: log.error("Something went wrong while parsing arguments!") exit(1) log.info("Registering the DC environment...") register_env("dc_env", get_env) # Configure all ray input parameters based on the arguments config = configure_ray(args) output_dir = config["env_config"]["output_dir"] # Check if the output directory exists before running dc_utils.check_dir(output_dir) # Dump the configuration dc_utils.dump_json(path=output_dir, name="ray_config", data=config) log.info("Starting Ray...") ts = time.time() ray.init(ignore_reinit_error=True, logging_level=logging.INFO, temp_dir=output_dir, plasma_store_socket_name="/tmp/plasma_socket%s" % ts, raylet_socket_name="/tmp/raylet_socket%s" % ts) log.info("Starting experiment.") if args.tune: tune_run(config, args.episodes, args.root_output, args.schedule) else: run_ray(config, args.episodes) # Wait until the topology is torn down completely # The flaky Mininet stop() call necessitates this # This is an unfortunate reality and may conflict with other ovs setups log.info("Waiting for environment to complete...") wait_for_ovs() # Take control back from root dc_utils.change_owner(args.root_output) # Ray doesn't play nice and prevents proper shutdown sometimes ray.shutdown() # time.sleep(1) # kill_ray() log.info("Experiment has completed.")
def record_rate(in_rate, ctrl_rate, sleep, out_dir): # Convert to human readable format ctrl_rate = ctrl_rate * (in_rate / 1e3) in_rate = in_rate / 1e6 log.info(f"Input: {in_rate} Mbps Expected: {ctrl_rate} kbps") log.info(f"Waiting for {sleep} seconds...") out_dir = "control_test" out_file = f"{out_dir}/{in_rate}mbps_in_{ctrl_rate}kbps_expected" dc_utils.check_dir(out_dir) ifstat_cmd = "ifstat -b " ifstat_cmd += "-i s1-eth1 " # interface to listen on ifstat_cmd += "-q 1 " # measurement interval ifstat_cmd += "%d " % sleep # measure how long return dc_utils.exec_process(ifstat_cmd, out_file=out_file)
def init(): increments = [4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048] if not ARGS.plot: dc_utils.check_dir(OUTPUT_DIR) log.info("Registering the DC environment...") register_env("dc_env", get_env) log.info("Starting Ray...") ray.init(num_cpus=1, logging_level=logging.WARN) for tf_index, num_hosts in enumerate(increments): config = configure_ray(num_hosts, tf_index) log.info("Starting experiment.") tune_run(config) time.sleep(10) log.info("Experiment has completed.") time.sleep(10) plot_scalability_graph(increments, data_dirs, PLOT_DIR, os.path.basename(TESTNAME.strip("/")))
def plot(data_dir, plot_dir, name): test_config = parse_config(data_dir, "bench_config") rl_algos = test_config["rl_algorithms"] tcp_algos = test_config["tcp_algorithms"] algos = rl_algos + tcp_algos runs = test_config["runs"] episodes = test_config["episodes"] transports = test_config["transport"] topo = test_config["topology"] min_samples = np.inf for transport in transports: plt_name = "%s/" % (plot_dir) plt_name += "%s" % name dc_utils.check_dir(plt_name) plt_name += "/%s" % topo plt_name += "_%s" % transport plt_name += "_%se" % episodes plt_stats = { "rewards": {}, "actions": {}, "backlog": {}, "bw_tx": {}, "olimit": {}, "drops": {} } for algo in algos: if (algo in tcp_algos): transport_dir = data_dir + "/tcp_" else: transport_dir = data_dir + "/%s_" % (transport.lower()) run_list, num_samples, = preprocess_data(algo, plt_stats.keys(), runs, transport_dir) # replace the assumed sample number with actual observed samples if (num_samples < min_samples): min_samples = num_samples # average over all runs log.info("Computing the average across all runs.") for stat in run_list.keys(): min_len = min([len(ls) for ls in run_list[stat]]) pruned_list = [ls[:min_len] for ls in run_list[stat]] plt_stats[stat][algo] = np.mean(pruned_list, axis=0) plot_lineplot(algos, plt_stats, min_samples, plt_name)
def __init__(self, conf={}): self.conf = DEFAULT_CONF self.conf.update(conf) # Init one-to-one mapped variables self.net_man = None self.state_man = None self.traffic_gen = None self.bw_ctrl = None self.sampler = None self.input_file = None self.terminated = False self.reward = RawValue('d', 0) # set the id of this environment self.short_id = dc_utils.generate_id() if self.conf["parallel_envs"]: self.conf["topo_conf"]["id"] = self.short_id # initialize the topology self.topo = TopoFactory.create(self.conf["topo"], self.conf["topo_conf"]) # Save the configuration we have, id does not matter here dc_utils.dump_json(path=self.conf["output_dir"], name="env_config", data=self.conf) dc_utils.dump_json(path=self.conf["output_dir"], name="topo_config", data=self.topo.conf) # set the dimensions of the state matrix self._set_gym_matrices() # Set the active traffic matrix self._set_traffic_matrix(self.conf["tf_index"], self.conf["input_dir"], self.topo) # each unique id has its own sub folder if self.conf["parallel_envs"]: self.conf["output_dir"] += f"/{self.short_id}" # check if the directory we are going to work with exists dc_utils.check_dir(self.conf["output_dir"]) # handle unexpected exits scenarios gracefully atexit.register(self.close)
def init(): output_dir = ARGS.output_dir + "/" + ARGS.agent dc_utils.check_dir(output_dir) test_run(INPUT_DIR, output_dir, ARGS.env, ARGS.topo)