コード例 #1
0
    def read_and_start(self):
        files = glob("params/*.json")

        for file in files:
            print(file)
            with open(file) as f:
                param = json.loads(f.read())
            self.callback([param])
コード例 #2
0
def get_opts(configfilename):
    """
    read config file and parse it (including our own json deserialization)
    :param configfilename:
    :return:
    """
    # tensorflow results and python3 require decoding
    if type(configfilename) == bytes:
        configfilename = configfilename.decode()

    # read config file and deserialize
    with open("%s.json" % configfilename) as f:
        opts = JsonSerialization.loads(f.read())

    return opts
コード例 #3
0
def get_filenames_and_config():
    """
    find all files, check consistency and read config
    :return: filenames and configs as list
    """

    # where to search for preprocessed data
    dir = "../data/preprocessed/"

    # find all files
    files = glob(dir + "*.pcap.json")
    files = [f[len(dir):] for f in files]

    # consistency check: do pcaps before and after bottleneck exist?
    names = {}
    for f in files:
        name = None
        add = 0
        if f.endswith("+before.pcap.json"):
            name = f[:-len("+before.pcap.json")]
            add = 1
        elif f.endswith("+after.pcap.json"):
            name = f[:-len("+after.pcap.json")]
            add = 2
        if name:
            if name not in names:
                names[name] = 0
            names[name] += add

    filenames = []
    for k, v in names.items():
        if v < 3:
            # one of both pcaps is missing
            print("missing files for %s" % k)
        else:
            if os.path.exists("../data/jsons/%s.json" % k):
                # everything alright
                beforepcap = "%s%s+before.pcap.json" % (dir, k)
                afterpcap = "%s%s+after.pcap.json" % (dir, k)
                with open("../data/jsons/%s.json" % k) as f:
                    config = json.loads(f.read())
                # add filenames, config and general id
                filenames.append((config, beforepcap, afterpcap, k))
            else:
                # configfile missing
                print("missing files for %s" % k)

    return filenames
コード例 #4
0
def create_single_host_runs():
    """
    combine single host network params and topology to config file
    """
    params = get_single_host_params()
    for param in params:
        filename = "single," + dict_to_filename(param)
        run = {}
        run['pcapName'] = "single"
        run['time'] = 60
        run['topoid'] = 'singlehost'
        run['topo'] = get_single_host_topology(**param)
        run['config'] = param

        with open(dir + filename + ".json", "w") as f:
            f.write(json.dumps(run))
コード例 #5
0
def create_crosstraffic_runs():
    """
    combine crosstraffic network params and topology to config file
    """
    params = get_crosstraffic_params()
    for param in params:
        filename = "cross," + dict_to_filename(param)
        run = {}
        run['pcapName'] = "cross"
        run['time'] = 60
        run['topoid'] = 'crosstraffic'
        run['topo'] = get_crosstraffic_topology(**param)
        run['config'] = param

        with open(dir + filename + ".json", "w") as f:
            f.write(json.dumps(run))
コード例 #6
0
def create_multi_host_runs():
    """
    combine multi host network params and topology to config file
    """
    params = get_multi_host_params()
    for param in params:
        cong1, cong2 = param['cong12']
        del param['cong12']
        param['cong1'] = cong1
        param['cong2'] = cong2
        filename = "multi," + dict_to_filename(param)
        run = {}
        run['pcapName'] = "multi"
        run['time'] = 60
        run['topoid'] = 'multihost'
        run['topo'] = get_multi_host_topology(**param)
        run['config'] = param

        with open(dir + filename + ".json", "w") as f:
            f.write(json.dumps(run))
コード例 #7
0
def mp_create_hist(outdir, filenames_and_config):
    """
    create histograms for files
    :param outdir: dir to output histograms to
    :param filenames_and_config: preparsed config and datafilename
    :return: csv entry line
    """
    (config, before, after, k) = filenames_and_config
    cong = config['config']['cong']

    # get variant and pacing label from name
    variant_idx = names_to_variant_idx[cong]
    pacing_idx = names_to_pacing_idx[cong]

    # check whether files already exist - if yes, skip histogram creation
    file_exists = os.path.isfile("%s/1ms/%s_before.csv" %
                                 (outdir, k)) and os.path.isfile(
                                     "%s/1ms/%s_before.csv" % (outdir, k))
    if file_exists:
        return [(variant_idx, pacing_idx, 0, "%s" % k, "%s_before.csv" % k),
                (variant_idx, pacing_idx, 1, "%s" % k, "%s_after.csv" % k)]

    # read packet arrival jsons
    try:
        before_file = open(before, 'r')
        after_file = open(after, 'r')
        before_data = json.loads(before_file.read())
        after_data = json.loads(after_file.read())
    except Exception as e:
        print(e, "problem with", (before, after))
        return None

    # extract flows from packet arrival
    before_flow_data = get_right_flow(before_data, config)
    after_flow_data = get_right_flow(after_data, config)

    if before_flow_data is None or after_flow_data is None:
        print("problem parsing", (before, after))
        return None

    # extract flow directions
    before_flow, before_flow_back, _ = before_flow_data
    after_flow, after_flow_back, _ = after_flow_data

    # extract timestamp, length and sequence number
    packets_before = [(packet['t'], packet['l'], packet['seq'])
                      for packet in before_flow if packet['l'] > 0]
    packets_after = [(packet['t'], packet['l'], packet['seq'])
                     for packet in after_flow if packet['l'] > 0]

    # sort if wrong order
    packets_before = sort_if_necessary(packets_before)
    packets_after = sort_if_necessary(packets_after)

    # create histograms
    hist_before = get_histogram([packets_before], steps=0.001, end=60.0)
    hist_after = get_histogram([packets_after], steps=0.001, end=60.0)

    # write histograms to csv files
    _csv("%s/1ms/%s_before.csv" % (outdir, k), hist_before)
    _csv("%s/1ms/%s_after.csv" % (outdir, k), hist_after)

    # return two csv label file lines (before and after bottleneck)
    return [(variant_idx, pacing_idx, 0, "%s" % k, "%s_before.csv" % k),
            (variant_idx, pacing_idx, 1, "%s" % k, "%s_after.csv" % k)]
コード例 #8
0
def do(datadir, jsondir, tmpdir, runs):
    """

    :param datadir: directory where to save pcaps
    :param jsondir: directory where to save jsons
    :param tmpdir:  directory where to save pcaps temporarily
    :param runs: list of topology information
    """
    runtime_params = []

    # it is possible to have multiple topologies in parallel (untested, we only used one)
    for run in runs:
        runtime_params.append({})
        runtime_params[-1]['time'] = run['time']
        runtime_params[-1]['opts'] = run
        ok = False

        while True:
            # find unused filenames
            uniqueid = uuid.uuid4().hex
            runtime_params[-1]['uniquename'] = run['pcapName'] + '+' + uniqueid

            logfilename = jsondir + runtime_params[-1]['uniquename'] + ".log"
            templogfilename = tmpdir + '/' + runtime_params[-1][
                'uniquename'] + ".log"
            runtime_params[-1]['templogfilename'] = templogfilename
            runtime_params[-1]['logfilename'] = logfilename

            runtime_params[-1]['jsonfilename'] = jsondir + runtime_params[-1][
                'uniquename'] + ".json"

            if not os._exists(logfilename):
                break

        # log into file
        runtime_params[-1]['filehandler'] = logging.FileHandler(
            templogfilename)

        runtime_params[-1]['filehandler'].setLevel(logging.INFO)
        lg.addHandler(runtime_params[-1]['filehandler'])

    retry = 0
    while True:
        retry += 1

        # setup topology from params
        topo = DynamicTopo(runs)
        net = MininetCong(
            topo=topo,
            link=TCLinkTBF,
            controller=None,  # OVSController,
            switch=OVSBridge)
        # setup and build network
        net.start()
        net.waitConnected()

        for runtime in runtime_params:
            lg.output("%s\n" % runtime['opts'])

        run_senders = topo.getSenders()
        run_recordsenders = topo.getRecordsenders()
        run_receivers = topo.getReceivers()
        run_recorddevcons = topo.getRecordDevCons()
        # print(run_recorddevcons)
        run_recordlinks = topo.getRecordingLinks()

        switch_by_name = {s.name: s for s in net.switches}
        hosts_by_name = {h.name: h for h in net.hosts}

        run_senders = [[hosts_by_name[sender] for sender in senders]
                       for senders in run_senders]
        run_receivers = [[hosts_by_name[receiver] for receiver in receivers]
                         for receivers in run_receivers]
        run_recordsenders = [[
            hosts_by_name[recordsender] for recordsender in recordsenders
        ] for recordsenders in run_recordsenders]
        run_recorddevcons = [[(hosts_by_name[a], hosts_by_name[b], c, p)
                              for a, b, c, p in devcons]
                             for devcons in run_recorddevcons]

        recs = []

        # set up recordings
        for senders, recordlinks, runtime, recordsenders in zip(
                run_senders, run_recordlinks, runtime_params,
                run_recordsenders):
            last_sender = recordsenders[0]
            assert len(recordsenders) == 1
            # switches = [(s0, "s0"), (s1, "s1"), (s4, "s4"), (s5, "s5")]
            # edgeswitches = [(switch_by_name[first_switch], "s0"), (switch_by_name[last_switch], "s5")]

            runtime['opts']['ip'] = last_sender.IP()

            for link, switchname, name in recordlinks:
                print(link())
                switch = switch_by_name[switchname]
                filename = runtime['uniquename'] + "+" + name + ".pcap"
                recs.append(
                    PcapRecorder(switch, tmpdir + '/' + filename,
                                 link().name, last_sender.IP(),
                                 datadir + filename))

        # try up to 40 times to ping the connected hots
        for i in range(40):
            l = 0
            for senders, receivers, recordsenders in zip(
                    run_senders, run_receivers, run_recordsenders):
                all_hosts = senders + recordsenders + receivers
                l += net.ping(all_hosts)

            if l == 0:
                break

        sleep(2)

        # start trafficgen server
        for receiver, runtime in zip(run_receivers, runtime_params):
            for h2 in receiver:
                lg.output("start own server %s\n" % h2.name)
                # h1 is client sender, h2 is server receiver
                # data is sent from h1 to h2
                net.ownStartServer(h2, seconds=runtime['time'] + 10)
                # h2.cmd("timeout 20 nc -l -p 5001 > /dev/null &")

        sleep(2)

        # start test is server is listening
        con = True
        for recorddevcon, runtime in zip(run_recorddevcons, runtime_params):
            for (h1, h2, c, p) in recorddevcon:
                lg.output("test connection between server %s and client %s\n" %
                          (h2.name, h1.name))
                con = net.ownTestConnection(h2, h1)
                if not con:
                    break
        if not con:
            lg.output("connection failed\n")
            with open("errors.txt", "a+") as f:
                f.write("connection failed\n")
            for receiver in run_receivers:
                for h2 in receiver:
                    lg.output("stop own server %s\n" % h2.name)
                    # h1 is client, h2 is server
                    # data is sent from h1 to h2
                    net.ownStopServer(h2)
            try:
                net.stop()
            except:
                pass
            cleanup()
            if retry <= 3:
                continue
            else:
                lg.output("3 retries failed\n")
                with open("errors.txt", "a+") as f:
                    f.write("3 retries failed\n")
                break

        lg.output("run generation\n")

        # start client (sender) of background connections
        i = 0
        for recordsenders, devcon, runtime in zip(run_recordsenders,
                                                  run_recorddevcons,
                                                  runtime_params):
            # for (h1, h2) in zip(sender, receiver):
            for (h1, h2, cong, pacing) in devcon:
                if h1 in recordsenders:
                    continue
                net.ownStartClient(h2,
                                   h1,
                                   seconds=runtime['time'] + 2 + 2,
                                   cong=cong,
                                   pacing=pacing)
                # h1.cmd("timeout 15 nc 10.0.0.1 5001 & ")
                i += 1

        # start tcpdump recording
        for rec in recs:
            rec.start()

        sleep(2)

        # start client (sender) which should be recorded
        for recordsenders, devcon, runtime in zip(run_recordsenders,
                                                  run_recorddevcons,
                                                  runtime_params):
            # for (h1, h2) in zip(sender, receiver):
            for (h1, h2, cong, pacing) in devcon:
                if not h1 in recordsenders:
                    continue
                net.ownStartClient(h2,
                                   h1,
                                   seconds=runtime['time'],
                                   cong=cong,
                                   pacing=pacing)
                # h1.cmd("timeout 10 dd if=/dev/zero | nc 10.0.0.2 5001")
                #        net.iperf((h1, h2), seconds=5, cong=cong[-1])

        sleep(max([runtime['time'] for runtime in runtime_params]) + 2)

        # stop recording
        for rec in recs:
            rec.stop()

        # stop client and server and check whether succesful
        try:
            for sender, receiver, recordsender, runtime in zip(
                    run_senders, run_receivers, run_recordsenders,
                    runtime_params):
                for h1 in sender + recordsender:
                    # h1 is client, h2 is server
                    lg.output("stop %s\n" % h1.name)
                    net._parseOwn(net.ownStopClient(h1))
                for h2 in receiver:
                    lg.output("stop %s\n" % h2.name)
                    net._parseOwn(net.ownStopServer(h2))

        except Exception as e:
            lg.output("stopping hosts failed\n")
            with open("errors.txt", "a+") as f:
                f.write("stopping hosts failed " + str(e) + "\n")
            for sender, receiver, recordsender, runtime in zip(
                    run_senders, run_receivers, run_recordsenders,
                    runtime_params):
                for h1 in sender + recordsender:
                    # h1 is client, h2 is server
                    lg.output("stop %s\n" % h1.name)
                    net.ownStopClient(h1)
                for h2 in receiver:
                    lg.output("stop %s\n" % h2.name)
                    net.ownStopServer(h2)
            cleanup()
            if retry <= 3:
                continue
            else:
                lg.output("3 retries failed\n")
                with open("errors.txt", "a+") as f:
                    f.write("3 retries failed\n")
                break

        net.stop()
        ok = True
        break

    print("remove handler\n")
    for runtime in runtime_params:
        with open(runtime['jsonfilename'], 'w') as f:
            f.write(json.dumps(runtime['opts']))
            f.flush()
        lg.removeHandler(runtime['filehandler'])
        shutil.move(runtime['templogfilename'], runtime['logfilename'])
    cleanup()
    print("done\n")
    return ok