Beispiel #1
0
def main():
    args = parse_args()
    if args.trace_file and args.trace_file.endswith('.json'):
        trace = Trace.load_from_file(args.trace_file)
    elif args.trace_file and args.trace_file.endswith('.log'):
        trace = Trace.load_from_pantheon_file(args.trace_file, 0, 50, 500)
    else:
        trace = None

    for log_idx, log_file in enumerate(args.log_file):
        if not os.path.exists(log_file):
            continue
        pkt_log = PacketLog.from_log_file(log_file, 500)
        cc = os.path.splitext(os.path.basename(log_file))[0].split('_')[0]

        sending_rate_ts, sending_rate = pkt_log.get_sending_rate()
        throughput_ts, throughput = pkt_log.get_throughput()
        rtt_ts, rtt = pkt_log.get_rtt()
        # queue_delay_ts, queue_delay = pkt_log.get_queue_delay()
        pkt_loss = pkt_log.get_loss_rate()
        avg_tput = pkt_log.get_avg_throughput()
        avg_sending_rate = pkt_log.get_avg_sending_rate()
        avg_lat = pkt_log.get_avg_latency()
        reward = pkt_log.get_reward("", None)
        normalized_reward = pkt_log.get_reward("", trace)
        plot(trace, throughput_ts, throughput, sending_rate_ts, sending_rate,
             avg_tput, avg_sending_rate, rtt_ts, rtt, avg_lat, pkt_loss,
             reward, normalized_reward, args.save_dir, cc)
Beispiel #2
0
def main():
    args = parse_args()
    assert args.pretrained_model_path is None or args.pretrained_model_path.endswith(
        ".ckpt")
    os.makedirs(args.save_dir, exist_ok=True)
    save_args(args)
    set_seed(args.seed + COMM_WORLD.Get_rank() * 100)
    nprocs = COMM_WORLD.Get_size()

    # Initialize model and agent policy
    aurora = Aurora(args.seed + COMM_WORLD.Get_rank() * 100, args.save_dir,
                    int(7200 / nprocs), args.pretrained_model_path,
                    tensorboard_log=args.tensorboard_log)
    # training_traces, validation_traces,
    training_traces = []
    val_traces = []
    if args.train_trace_file:
        with open(args.train_trace_file, 'r') as f:
            for line in f:
                line = line.strip()
                if args.dataset == 'pantheon':
                    queue = 100  # dummy value
                    # if "ethernet" in line:
                    #     queue = 500
                    # elif "cellular" in line:
                    #     queue = 50
                    # else:
                    #     queue = 100
                    training_traces.append(Trace.load_from_pantheon_file(
                        line, queue=queue, loss=0))
                elif args.dataset == 'synthetic':
                    training_traces.append(Trace.load_from_file(line))
                else:
                    raise ValueError

    if args.val_trace_file:
        with open(args.val_trace_file, 'r') as f:
            for line in f:
                line = line.strip()
                if args.dataset == 'pantheon':
                    queue = 100  # dummy value
                    # if "ethernet" in line:
                    #     queue = 500
                    # elif "cellular" in line:
                    #     queue = 50
                    # else:
                    #     queue = 100
                    val_traces.append(Trace.load_from_pantheon_file(
                        line, queue=queue, loss=0))
                elif args.dataset == 'synthetic':
                    val_traces.append(Trace.load_from_file(line))
                else:
                    raise ValueError
    print(args.randomization_range_file)

    aurora.train(args.randomization_range_file,
                 args.total_timesteps, tot_trace_cnt=args.total_trace_count,
                 tb_log_name=args.exp_name, validation_flag=args.validation,
                 training_traces=training_traces,
                 validation_traces=val_traces)
Beispiel #3
0
def main():
    args = parse_args()
    for _, log_file in enumerate(args.log_file):
        if not os.path.exists(log_file):
            continue
        if not args.trace_file:
            trace = None
        elif args.trace_file.endswith('.json'):
            trace = Trace.load_from_file(args.trace_file)
        elif args.trace_file.endswith('.log'):
            trace = Trace.load_from_pantheon_file(args.trace_file,
                                                  loss=0,
                                                  queue=10)
        else:
            trace = None
        cc = os.path.basename(log_file).split('_')[0]
        plot(trace, log_file, args.save_dir, cc)
Beispiel #4
0
 def load_from_dir(trace_dir: str):
     files = sorted(glob.glob(os.path.join(trace_dir, 'trace_*.json')))
     traces = []
     for file in files:
         traces.append(Trace.load_from_file(file))
     dataset = SyntheticDataset(len(traces), None)
     dataset.traces = traces
     return dataset
Beispiel #5
0
def main():
    args = parse_args()
    bbr = BBR(False)
    cubic = Cubic(False)

    validation_traces = []
    save_dirs = []
    for i in range(20):
        trace_file = os.path.join(args.save_dir, 'validation_traces',
                                  "trace_{}.json".format(i))
        if not os.path.exists(trace_file):
            continue
        validation_traces.append(Trace.load_from_file(trace_file))

        save_dir = os.path.join(args.save_dir, 'validation_traces',
                                "trace_{}".format(i))
        os.makedirs(save_dir, exist_ok=True)
        save_dirs.append(save_dir)
    bbr_trace_rewards = bbr.test_on_traces(validation_traces, save_dirs, False)
    cubic_trace_rewards = cubic.test_on_traces(validation_traces, save_dirs,
                                               False)
    bbr_rewards = [mi_level_reward for mi_level_reward, _ in bbr_trace_rewards]
    cubic_rewards = [
        mi_level_reward for mi_level_reward, _ in cubic_trace_rewards
    ]

    for log_file in args.log_file:
        plt.figure()
        model_name = log_file.split('/')[-2]
        plt.title(model_name)
        df = pd.read_csv(log_file, sep='\t')
        best_step = int(
            df['num_timesteps'][df['mean_validation_reward'].argmax()])
        t_used = df['tot_t_used(min)'][df['mean_validation_reward'].argmax()]
        best_reward = df['mean_validation_reward'].max()
        best_model_path = os.path.join(
            os.path.dirname(log_file),
            "model_step_{}.ckpt.meta".format(best_step))

        plt.plot(
            df['num_timesteps'],
            df['mean_validation_reward'],
            'o-',
            label="best_reward: {:.2f}, best step: {}, used {:.2f}min".format(
                best_reward, int(best_step), t_used))
        plt.axhline(y=np.mean(bbr_rewards), c='r', label='BBR')
        plt.axhline(y=np.mean(cubic_rewards), c='k', label='Cubic')
        plt.xlabel('Num steps')
        plt.ylabel('Validation Reward')
        plt.legend()
        assert os.path.exists(best_model_path)
        print(best_model_path.replace(".meta", ""))
        if args.save_dir:
            os.makedirs(args.save_dir, exist_ok=True)
            plt.savefig(
                os.path.join(args.save_dir,
                             '{}_val_curve.png'.format(model_name)))
        plt.close()
Beispiel #6
0
 def get_reward(self, trace_file: str, trace=None) -> float:
     if trace_file and trace_file.endswith('.json'):
         trace = Trace.load_from_file(trace_file)
     elif trace_file and trace_file.endswith('.log'):
         trace = Trace.load_from_pantheon_file(trace_file, 0, 50, 500)
     loss = self.get_loss_rate()
     if trace is None:
         # original reward
         return pcc_aurora_reward(
             self.get_avg_throughput() * 1e6 / BITS_PER_BYTE /
             BYTES_PER_PACKET,
             self.get_avg_latency() / 1e3, loss)
     # normalized reward
     return pcc_aurora_reward(
         self.get_avg_throughput() * 1e6 / BITS_PER_BYTE / BYTES_PER_PACKET,
         self.get_avg_latency() / 1e3, loss,
         trace.avg_bw * 1e6 / BITS_PER_BYTE / BYTES_PER_PACKET,
         trace.min_delay * 2 / 1e3)
Beispiel #7
0
 def load_from_file(trace_file: str):
     traces = []
     with open(trace_file, 'r') as f:
         for line in f:
             line = line.strip()
             traces.append(Trace.load_from_file(line))
     dataset = SyntheticDataset(len(traces), None)
     dataset.traces = traces
     return dataset
Beispiel #8
0
def main():
    args = parse_args()
    set_seed(args.seed)
    if args.save_dir:
        os.makedirs(args.save_dir, exist_ok=True)

    if args.trace_file is not None and args.trace_file.endswith('.json'):
        test_traces = [Trace.load_from_file(args.trace_file)]
    elif args.trace_file is not None and args.trace_file.endswith('.log'):
        test_traces = [
            Trace.load_from_pantheon_file(args.trace_file, args.delay,
                                          args.loss, args.queue)
        ]
    elif args.config_file is not None:
        test_traces = generate_traces(args.config_file,
                                      1,
                                      args.duration,
                                      constant_bw=not args.time_variant_bw)
    else:
        test_traces = [
            generate_trace((args.duration, args.duration),
                           (args.bandwidth, args.bandwidth),
                           (args.delay, args.delay), (args.loss, args.loss),
                           (args.queue, args.queue), (60, 60), (60, 60),
                           constant_bw=not args.time_variant_bw)
        ]
    # print(test_traces[0].bandwidths)

    aurora = Aurora(seed=args.seed,
                    timesteps_per_actorbatch=10,
                    log_dir=args.save_dir,
                    pretrained_model_path=args.model_path,
                    delta_scale=args.delta_scale)
    results, pkt_logs = aurora.test_on_traces(test_traces, [args.save_dir])

    for pkt_log in pkt_logs:
        with open(os.path.join(args.save_dir, "aurora_packet_log.csv"), 'w',
                  1) as f:
            pkt_logger = csv.writer(f, lineterminator='\n')
            pkt_logger.writerows(pkt_log)
def main():
    args = parse_args()
    for trace_file in glob.glob(os.path.join(args.trace_dir, "*.json")):
        trace_name = os.path.splitext(os.path.basename(trace_file))[0]
        tr = Trace.load_from_file(trace_file)
        ms_series = tr.convert_to_mahimahi_format()
        with open(os.path.join(args.save_dir, trace_name), 'w', 1) as f:
            for ms in ms_series:
                f.write(str(ms) + '\n')

        with open(os.path.join(args.save_dir, 'loss'), 'w', 1) as f:
            f.write(str(tr.loss_rate))
        with open(os.path.join(args.save_dir, 'queue'), 'w', 1) as f:
            f.write(str(int(tr.queue_size)))
        with open(os.path.join(args.save_dir, 'delay'), 'w', 1) as f:
            f.write(str(int(np.mean(np.array(tr.delays)))))
Beispiel #10
0
    rewards = []
    for trace, log_file in zip(traces, log_files):
        if not os.path.exists(log_file):
            continue
        pkt_log = PacketLog.from_log_file(log_file)
        rewards.append(pkt_log.get_reward("", trace))
    return rewards

traces = []
save_dirs = []
genet_save_dirs = []
for cc in TARGET_CCS:
    print("Loading real traces collected by {}...".format(cc))
    for trace_file in tqdm(sorted(glob.glob(os.path.join(
            TRACE_ROOT, "{}_datalink_run[1,3].log".format(cc))))):
        traces.append(Trace.load_from_pantheon_file(trace_file, 0.0, 50))
        save_dirs.append(os.path.join(
            RESULT_ROOT, EXP_NAME, os.path.basename(TRACE_ROOT),
            os.path.splitext(os.path.basename(trace_file))[0]))
        genet_save_dirs.append(os.path.join(
            RESULT_ROOT, EXP_NAME1, os.path.basename(TRACE_ROOT),
            os.path.splitext(os.path.basename(trace_file))[0]))
bbr_rewards = load_cc_rewards_across_traces(traces, [os.path.join(save_dir, "bbr", "bbr_packet_log.csv") for save_dir in save_dirs])

cubic_rewards = load_cc_rewards_across_traces(traces, [os.path.join(save_dir, "cubic", "cubic_packet_log.csv") for save_dir in save_dirs])
import pdb
pdb.set_trace()

genet_steps = []
genet_avg_rewards = []
for bo in range(6):
Beispiel #11
0
    def test(self,
             trace: Trace,
             save_dir: str,
             plot_flag: bool = False) -> Tuple[float, float]:
        """Test a network trace and return rewards.

        The 1st return value is the reward in Monitor Interval(MI) level and
        the length of MI is 1 srtt. The 2nd return value is the reward in
        packet level. It is computed by using throughput, average rtt, and
        loss rate in each 500ms bin of the packet log. The 2nd value will be 0
        if record_pkt_log flag is False.

        Args:
            trace: network trace.
            save_dir: where a MI level log will be saved if save_dir is a
                valid path. A packet level log will be saved if record_pkt_log
                flag is True and save_dir is a valid path.
        """

        links = [Link(trace), Link(trace)]
        senders = [BBRSender(0, 0, self.seed)]
        net = Network(senders, links, self.record_pkt_log)

        rewards = []
        start_rtt = trace.get_delay(0) * 2 / 1000
        run_dur = start_rtt
        if save_dir:
            os.makedirs(save_dir, exist_ok=True)
            f_sim_log = open(
                os.path.join(save_dir,
                             '{}_simulation_log.csv'.format(self.cc_name)),
                'w', 1)
            writer = csv.writer(f_sim_log, lineterminator='\n')
            writer.writerow([
                'timestamp', "send_rate", 'recv_rate', 'latency', 'loss',
                'reward', "action", "bytes_sent", "bytes_acked", "bytes_lost",
                "send_start_time", "send_end_time", 'recv_start_time',
                'recv_end_time', 'latency_increase', "packet_size",
                'bandwidth', "queue_delay", 'packet_in_queue', 'queue_size',
                'cwnd', 'ssthresh', "rto", "packets_in_flight"
            ])
        else:
            f_sim_log = None
            writer = None

        while True:
            net.run(run_dur)
            mi = senders[0].get_run_data()

            throughput = mi.get("recv rate")  # bits/sec
            send_rate = mi.get("send rate")  # bits/sec
            latency = mi.get("avg latency")
            avg_queue_delay = mi.get("avg queue delay")
            loss = mi.get("loss ratio")

            reward = pcc_aurora_reward(
                throughput / BITS_PER_BYTE / BYTES_PER_PACKET, latency, loss,
                trace.avg_bw * 1e6 / BITS_PER_BYTE / BYTES_PER_PACKET)
            rewards.append(reward)
            try:
                ssthresh = senders[0].ssthresh
            except:
                ssthresh = 0
            action = 0

            if save_dir and writer:
                writer.writerow([
                    net.get_cur_time(), send_rate, throughput, latency, loss,
                    reward, action, mi.bytes_sent, mi.bytes_acked,
                    mi.bytes_lost, mi.send_start, mi.send_end, mi.recv_start,
                    mi.recv_end,
                    mi.get('latency increase'), mi.packet_size,
                    links[0].get_bandwidth(net.get_cur_time()) *
                    BYTES_PER_PACKET * BITS_PER_BYTE, avg_queue_delay,
                    links[0].pkt_in_queue, links[0].queue_size,
                    senders[0].cwnd, ssthresh, senders[0].rto,
                    senders[0].bytes_in_flight / BYTES_PER_PACKET
                ])
            if senders[0].srtt:
                run_dur = senders[0].srtt
            should_stop = trace.is_finished(net.get_cur_time())
            if should_stop:
                break
        if f_sim_log:
            f_sim_log.close()
        avg_sending_rate = senders[0].avg_sending_rate
        tput = senders[0].avg_throughput
        avg_lat = senders[0].avg_latency
        loss = senders[0].pkt_loss_rate
        pkt_level_reward = pcc_aurora_reward(tput,
                                             avg_lat,
                                             loss,
                                             avg_bw=trace.avg_bw * 1e6 /
                                             BITS_PER_BYTE / BYTES_PER_PACKET)
        pkt_level_original_reward = pcc_aurora_reward(tput, avg_lat, loss)
        if save_dir:
            with open(
                    os.path.join(save_dir,
                                 "{}_summary.csv".format(self.cc_name)),
                    'w') as f:
                summary_writer = csv.writer(f, lineterminator='\n')
                summary_writer.writerow([
                    'trace_average_bandwidth', 'trace_average_latency',
                    'average_sending_rate', 'average_throughput',
                    'average_latency', 'loss_rate', 'mi_level_reward',
                    'pkt_level_reward'
                ])
                summary_writer.writerow([
                    trace.avg_bw, trace.avg_delay,
                    avg_sending_rate * BYTES_PER_PACKET * BITS_PER_BYTE / 1e6,
                    tput * BYTES_PER_PACKET * BITS_PER_BYTE / 1e6, avg_lat,
                    loss,
                    np.mean(rewards), pkt_level_reward
                ])

        if self.record_pkt_log and save_dir:
            with open(
                    os.path.join(save_dir,
                                 "{}_packet_log.csv".format(self.cc_name)),
                    'w', 1) as f:
                pkt_logger = csv.writer(f, lineterminator='\n')
                pkt_logger.writerow([
                    'timestamp', 'packet_event_id', 'event_type', 'bytes',
                    'cur_latency', 'queue_delay', 'packet_in_queue',
                    'sending_rate', 'bandwidth'
                ])
                pkt_logger.writerows(net.pkt_log)
        # with open(os.path.join(save_dir, "{}_log.csv".format(self.cc_name)), 'w', 1) as f:
        #     writer = csv.writer(f, lineterminator='\n')
        #     writer.writerow(
        #         ['timestamp', 'pacing_gain', "pacing_rate", 'cwnd_gain',
        #          'cwnd', 'target_cwnd', 'prior_cwnd', "btlbw", "rtprop",
        #          "full_bw", 'state', "packets_in_flight",
        #          "in_fast_recovery_mode", 'rs_delivery_rate', 'round_start',
        #          'round_count', 'rto', 'exit_fast_recovery_ts',
        #          'pkt_in_queue'])
        #     writer.writerows(senders[0].bbr_log)
        if plot_flag and save_dir:
            plot_mi_level_time_series(
                trace,
                os.path.join(save_dir,
                             '{}_simulation_log.csv'.format(self.cc_name)),
                save_dir, self.cc_name)
            plot(trace, *senders[0].bin_tput, *senders[0].bin_sending_rate,
                 tput * BYTES_PER_PACKET * BITS_PER_BYTE / 1e6,
                 avg_sending_rate * BYTES_PER_PACKET * BITS_PER_BYTE / 1e6,
                 *senders[0].latencies, avg_lat * 1000, loss,
                 pkt_level_original_reward, pkt_level_reward, save_dir,
                 self.cc_name)
        return np.mean(rewards), pkt_level_reward
Beispiel #12
0
def main():
    args = parse_args()
    assert (not args.pretrained_model_path
            or args.pretrained_model_path.endswith(".ckpt"))
    os.makedirs(args.save_dir, exist_ok=True)
    save_args(args, args.save_dir)
    set_seed(args.seed + COMM_WORLD.Get_rank() * 100)
    nprocs = COMM_WORLD.Get_size()

    # Initialize model and agent policy
    aurora = Aurora(
        args.seed + COMM_WORLD.Get_rank() * 100,
        args.save_dir,
        int(args.val_freq / nprocs),
        args.pretrained_model_path,
        tensorboard_log=args.tensorboard_log,
    )
    # training_traces, validation_traces,
    training_traces = []
    val_traces = []
    if args.curriculum == "udr":
        config_file = args.config_file
        if args.train_trace_file:
            with open(args.train_trace_file, "r") as f:
                for line in f:
                    line = line.strip()
                    training_traces.append(Trace.load_from_file(line))

        if args.validation and args.val_trace_file:
            with open(args.val_trace_file, "r") as f:
                for line in f:
                    line = line.strip()
                    if args.dataset == "pantheon":
                        queue = 100  # dummy value
                        val_traces.append(
                            Trace.load_from_pantheon_file(line,
                                                          queue=queue,
                                                          loss=0))
                    elif args.dataset == "synthetic":
                        val_traces.append(Trace.load_from_file(line))
                    else:
                        raise ValueError
        train_scheduler = UDRTrainScheduler(
            config_file,
            training_traces,
            percent=args.real_trace_prob,
        )
    elif args.curriculum == "cl1":
        config_file = args.config_files[0]
        train_scheduler = CL1TrainScheduler(args.config_files, aurora)
    elif args.curriculum == "cl2":
        config_file = args.config_file
        train_scheduler = CL2TrainScheduler(config_file, aurora, args.baseline)
    else:
        raise NotImplementedError

    aurora.train(
        config_file,
        args.total_timesteps,
        train_scheduler,
        tb_log_name=args.exp_name,
        validation_traces=val_traces,
    )
Beispiel #13
0
    "bandwidth": [0, 1, 2, 3, 4, 5, 6],
    "delay": [5, 50, 100, 150, 200],
    "loss": [0, 0.01, 0.02, 0.03, 0.04, 0.05],
    "queue": [2, 10, 50, 100, 150, 200],
    "T_s": [0, 1, 2, 3, 4, 5, 6],
    "delay_noise": [0, 20, 40, 60, 80, 100],
}

real_traces = []
for trace_file in glob.glob(os.path.join(REAL_TRACE_DIR, "*datalink_run*.log")):
    if 'bbr' not in trace_file and 'cubic' not in trace_file and \
            'vegas' not in trace_file and 'pcc' not in trace_file and 'copa' not in trace_file:
        continue
    if 'experimental' in trace_file:
        continue
    tr = Trace.load_from_pantheon_file(trace_file, 50, 0, int(np.random.uniform(10, 10, 1).item()))
    print(tr.delays)
    print(min(tr.bandwidths), max(tr.bandwidths))
    real_traces.append(tr)


syn_traces = [generate_trace(duration_range=(30, 30),
                             bandwidth_range=(1, 3),
                             delay_range=(30, 50),
                             # delay_range=(100, 200),
                             loss_rate_range=(0, 0),
                             queue_size_range=(10, 60),
                             T_s_range=(1, 3),
                             delay_noise_range=(0, 0),
                             constant_bw=False) for _ in range(15)]
Beispiel #14
0
import csv
from simulator.trace import Trace
from common.utils import write_json_file

for i in range(5):
    timestamps = []
    bandwidths = []
    delays = []
    queue = 2
    loss = 0
    delay_noise = 0
    with open('test_aws_new/run{}/delay_time_series.csv'.format(i), 'r') as f:
        reader = csv.reader(f)
        for cols in reader:
            timestamps.append(float(cols[0]))
            delays.append(float(cols[1]))
            bandwidths.append(0.6)
    tr = Trace(timestamps, bandwidths, delays, loss, queue, delay_noise)
    tr.dump('test_aws_new/run{}/trace.json'.format(i))