예제 #1
0
def get_latency_stats(net):
    print "Capturing latency..."
    server = net.getNodeByName("h1")
    client = net.getNodeByName("h2")
    times = []
    start_time = time()
    cmd = "curl -o index.html -s -w %%{time_total} %s/http/index.html" % (server.IP())
    print cmd
    while True:
        # Calculate the amount of time to transfer webpage.
        p = client.popen(cmd, shell=True, stdout=PIPE)
        time_total = float(p.stdout.read())
        times.append(time_total)

        # Break out of loop after enough time has elapsed.
        sleep(5)
        now = time()
        delta = now - start_time
        if delta > args.time:
            break

    # Calculate mean and standard deviation of latency.
    mean = helper.avg(times)
    stdev = helper.stdev(times)
    return [mean, stdev]
예제 #2
0
    def update_physical_network_utilization(self):
        #print "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX", self.phy_link_utilizations
        self.phy_link_utilizations = self.controller.network_model.get_physical_link_utilizations()
        for link, utilization in self.phy_link_utilizations.iteritems():
            if link not in self.max_phy_link_utilizations:
                self.max_phy_link_utilizations[link] = utilization
            elif utilization > self.max_phy_link_utilizations[link]:
                self.max_phy_link_utilizations[link] = utilization
        for link, utilization in self.phy_link_utilizations.iteritems():
            if link not in self.max_phy_link_utilizations:
		self.avg_link_utilizations[link] = [utilization]
	    else:
		if (len(self.avg_link_utilizations[link]) <= 20):
		    self.avg_link_utilizations[link].append(utilization)
		else:
		    del self.avg_link_utilizations[link][0]
		    self.avg_link_utilizations[link].append(utilization)
                    self.link_utilizations[link] = helper.avg(self.avg_link_utilizations[link])
예제 #3
0
def bufferbloat():
    if not os.path.exists(args.dir):
        os.makedirs(args.dir)
    os.system("sysctl -w net.ipv4.tcp_congestion_control=%s" % args.cong)

    # Cleanup any leftovers from previous mininet runs
    cleanup()

    topo = BBTopo()
    net = Mininet(topo=topo, host=CPULimitedHost, link=TCLink)
    net.start()
    # This dumps the topology and how nodes are interconnected through
    # links.
    dumpNodeConnections(net.hosts)
    # This performs a basic all pairs ping test.
    net.pingAll()
    switch = net.get('s0')
    #switch.cmd('tc qdisc add dev s0-eth2 root pie limit 1000 target 20ms')
    #print "AQM pie Algorithm added"
    #switch.cmd('tc -s qdisc show')
    
    # Start all the monitoring processes
    start_tcpprobe("cwnd.txt")
    start_ping(net)

    # TODO: Start monitoring the queue sizes.  Since the switch I
    # created is "s0", I monitor one of the interfaces.  Which
    # interface?  The interface numbering starts with 1 and increases.
    # Depending on the order you add links to your network, this
    # number may be 1 or 2.  Ensure you use the correct number.
    #
    qmon = start_qmon(iface='s0-eth2',
                        outfile='%s/q.txt' % (args.dir))
    #s0.cmd('tc qdisc add dev s0-eth2 root pie limit 1000 target 20ms')
    # TODO: Start iperf, webservers, etc.
    start_iperf(net)
    start_webserver(net)

    # Hint: The command below invokes a CLI which you can use to
    # debug.  It allows you to run arbitrary commands inside your
    # emulated hosts h1 and h2.
    #
    # CLI(net)

    # TODO: measure the time it takes to complete webpage transfer
    # from h1 to h2 (say) 3 times.  Hint: check what the following
    # command does: curl -o /dev/null -s -w %{time_total} google.com
    # Now use the curl command to fetch webpage from the webserver you
    # spawned on host h1 (not from google!)
    # Hint: have a separate function to do this and you may find the
    # loop below useful.
    start_time = time()
    time_measures = []
    while True:
        # do the measurement (say) 3 times.
        now = time()
        delta = now - start_time
        if delta > args.time:
            break
        print "%.1fs left..." % (args.time - delta)

        h1 = net.get('h1')
        h2 = net.get('h2')
        for i in range(3):
            webpage_time = h2.popen('curl -o /dev/null -s -w %%{time_total} %s/http/index.html' %
                    h1.IP()).communicate()[0]
            time_measures.append(float(webpage_time))
        sleep(5)

    # TODO: compute average (and standard deviation) of the fetch
    # times.  You don't need to plot them.  Just note it in your
    # README and explain.
    with open('%s/avgsd.txt'%(args.dir), 'w') as f:
        f.write("Average: %lf\nStandard Deviation: %lf\n" %(avg(time_measures), stdev(time_measures)))

    print("Averagae is %d",avg(time_measures))
    print("standard deviation is %d",stdev(time_measures))
    stop_tcpprobe()
    if qmon is not None:
        qmon.terminate()
    net.stop()
    # Ensure that all processes you create within Mininet are killed.
    # Sometimes they require manual killing.
    Popen("pgrep -f webserver.py | xargs kill -9", shell=True).wait()
예제 #4
0
def bufferbloat():
    if args.http3:
        print("http3")
    else:
        print("tcp")
    if not os.path.exists(args.dir):
        os.makedirs(args.dir)
    os.system("sysctl -w net.ipv4.tcp_congestion_control=%s" % args.cong)
    topo = BBTopo()
    net = Mininet(topo=topo, host=CPULimitedHost, link=TCLink)
    net.start()
    # This dumps the topology and how nodes are interconnected through
    # links.
    dumpNodeConnections(net.hosts)
    # This performs a basic all pairs ping test.
    net.pingAll()

    # TODO: Start monitoring the queue sizes.  Since the switch I
    # created is "s0", I monitor one of the interfaces.  Which
    # interface?  The interface numbering starts with 1 and increases.
    # Depending on the order you add links to your network, this
    # number may be 1 or 2.  Ensure you use the correct number.
    qmon = start_qmon(iface='s0-eth2', outfile='%s/q.txt' % (args.dir))

    # TODO: Start iperf, webservers, etc.
    start_iperf(net)
    start_ping(net)
    start_webserver(net)

    # TODO: measure the time it takes to complete webpage transfer
    # from h1 to h2 (say) 3 times.  Hint: check what the following
    # command does: curl -o /dev/null -s -w %{time_total} google.com
    # Now use the curl command to fetch webpage from the webserver you
    # spawned on host h1 (not from google!)

    # As a sanity check, before the time measurement, check whether the
    # webpage is transferred successfully by checking the response from curl

    # Hint: have a separate function to do this and you may find the
    # loop below useful.
    client = net.get('h2')
    server = net.get('h1')
    start_time = time()
    measurement = []
    while True:
        # do the measurement (say) 3 times.
        for _ in range(0, 3):
            valid = client.popen("curl -ILs %s/http/index.html ^HTTP" %
                                 (server.IP()),
                                 shell=True).communicate()[0]
            # 200 ---> request success
            if "200".encode(encoding="utf-8") in valid:
                print("pass sanity")
                response_t = client.popen(
                    "curl -o /dev/null -s -w %%{time_total} %s/http/index.html"
                    % server.IP(),
                    shell=True).communicate()[0]
                print("response_time: " + response_t.decode("utf-8"))
                measurement.append(float(response_t))
            else:
                print("fail sanity")
        sleep(5)
        now = time()
        delta = now - start_time
        if delta > args.time:
            break
        print("%.1fs left..." % (args.time - delta))

    # TODO: compute average (and standard deviation) of the fetch
    # times.  You don't need to plot them.  Just note it in your
    # README and explain.

    sd = stdev(measurement)
    avg_res = avg(measurement)
    print("Mean: {}, Stddev: {}".format(avg_res, sd))

    # Hint: The command below invokes a CLI which you can use to
    # debug.  It allows you to run arbitrary commands inside your
    # emulated hosts h1 and h2.
    # CLI(net)

    qmon.terminate()
    net.stop()
    # Ensure that all processes you create within Mininet are killed.
    # Sometimes they require manual killing.
    Popen("pgrep -f webserver.py | xargs kill -9", shell=True).wait()
예제 #5
0
파일: bufferbloat.py 프로젝트: zkjfry/pa2
def bufferbloat():
    if not os.path.exists(args.dir):
        os.makedirs(args.dir)
    os.system("sysctl -w net.ipv4.tcp_congestion_control=%s" % args.cong)

    # Cleanup any leftovers from previous mininet runs
    cleanup()

    topo = BBTopo()
    net = Mininet(topo=topo, host=CPULimitedHost, link=TCLink)
    net.start()
    # This dumps the topology and how nodes are interconnected through
    # links.
    dumpNodeConnections(net.hosts)
    # This performs a basic all pairs ping test.
    net.pingAll()

    # Start all the monitoring processes
    start_tcpprobe("cwnd.txt")
    start_ping(net)

    # TODO: Start monitoring the queue sizes.  Since the switch I
    # created is "s0", I monitor one of the interfaces.  Which
    # interface?  The interface numbering starts with 1 and increases.
    # Depending on the order you add links to your network, this
    # number may be 1 or 2.  Ensure you use the correct number.
    #
    qmon = start_qmon(iface='s0-eth2', outfile='%s/q.txt' % (args.dir))
    # qmon = None

    # TODO: Start iperf, webservers, etc.
    iperf = Process(target=start_iperf, args=(net, ))
    ping = Process(target=start_ping, args=(net, ))
    web = Process(target=start_webserver, args=(net, ))
    iperf.start()
    ping.start()
    web.start()
    # Hint: The command below invokes a CLI which you can use to
    # debug.  It allows you to run arbitrary commands inside your
    # emulated hosts h1 and h2.
    #
    # CLI(net)

    # TODO: measure the time it takes to complete webpage transfer
    # from h1 to h2 (say) 3 times.  Hint: check what the following
    # command does: curl -o /dev/null -s -w %{time_total} google.com
    # Now use the curl command to fetch webpage from the webserver you
    # spawned on host h1 (not from google!)
    # Hint: have a separate function to do this and you may find the
    # loop below useful.
    start_time = time()
    h1 = net.get('h1')
    h2 = net.get('h2')
    downloads = []
    curls = []
    while True:
        # Downloading every 2 seconds so sleep for 2 seconds
        sleep(2)
        now = time()
        delta = now - start_time
        # Finishing 60 seconds
        if delta > args.time:
            break
        print("%.1fs left..." % (args.time - delta))

        # Downloaded to h2, download time raw data appended to a list
        curls.append(
            h2.popen(
                'curl -o /dev/null -s -w %%{time_total} %s/http/index.html' %
                h1.IP()))

    # Processing raw data of download time
    for curl in curls:
        download = curl.communicate()[0]
        downloads.append(float(download))

    # Writing download data to a text file as floats for plotting purposes
    f = open("%s/download.txt" % args.dir, "w")
    f.writelines("%f\n" % download for download in downloads)
    f.close()

    # TODO: compute average (and standard deviation) of the fetch
    # times.  You don't need to plot them.  Just note it in your
    # README and explain.
    with open(os.path.join(args.dir, 'measurements.txt'), 'w') as f:
        f.write(
            "Average download time is %lf, standard deviation for download time is %lf\n"
            % (helper.avg(downloads), helper.stdev(downloads)))

    stop_tcpprobe()
    if qmon is not None:
        qmon.terminate()
    net.stop()
    # Ensure that all processes you create within Mininet are killed.
    # Sometimes they require manual killing.
    Popen("pgrep -f webserver.py | xargs kill -9", shell=True).wait()
예제 #6
0
def bufferbloat():
    if not os.path.exists(args.dir):
        os.makedirs(args.dir)
    os.system("sysctl -w net.ipv4.tcp_congestion_control=%s" % args.cong)

    # Cleanup any leftovers from previous mininet runs
    cleanup()

    topo = BBTopo()
    net = Mininet(topo=topo, host=CPULimitedHost, link=TCLink)
    net.start()
    # This dumps the topology and how nodes are interconnected through
    # links.
    dumpNodeConnections(net.hosts)
    # This performs a basic all pairs ping test.
    net.pingAll()

    # Start all the monitoring processes
    start_tcpprobe("cwnd.txt")
    start_ping(net)

    # Start monitoring the queue sizes.  Since the switch I
    # created is "s0", I monitor one of the interfaces.  Which
    # interface?  The interface numbering starts with 1 and increases.
    # Depending on the order you add links to your network, this
    # number may be 1 or 2.  Ensure you use the correct number.
    #
    qmon = start_qmon(iface='s0-eth2', outfile='%s/q.txt' % (args.dir))

    #Start iperf, webservers, etc.
    start_iperf(net)
    start_webserver(net)

    # Hint: The command below invokes a CLI which you can use to
    # debug.  It allows you to run arbitrary commands inside your
    # emulated hosts h1 and h2.
    #
    # CLI(net)

    # Measure the time it takes to complete webpage transfer
    # from h1 to h2 (say) 3 times.  Hint: check what the following
    # command does: curl -o /dev/null -s -w %{time_total} google.com
    # Now use the curl command to fetch webpage from the webserver you
    # spawned on host h1 (not from google!)
    # Hint: have a separate function to do this and you may find the
    # loop below useful.
    h1 = net.get('h1')
    h2 = net.get('h2')
    #Long-lived flow starts from h2 to h1 so we will do the same order for the following
    start_time = time()
    flow_clock = []
    while True:
        # do the measurement (say) 3 times.
        #When running the curl command to fetch a web page, please fetch webserver_ip_address/http/index.html.
        flow_clock = fetch_webpage(h1, h2, flow_clock)
        sleep(5)
        now = time()
        delta = now - start_time
        if delta > args.time:
            break
        print "%.1fs left..." % (args.time - delta)

    # Compute average (and standard deviation) of the fetch
    # times.  You don't need to plot them.  Just note it in your
    # README and explain.

    #Printing average with helper.py
    flow_clock = map(float, flow_clock)  #adding this line just in case
    average_fetch = helper.avg(flow_clock)
    std_dev_fetch = helper.stdev(flow_clock)
    print("Average fetch time is: %f\n" % average_fetch)
    print("Standard deviation of the fetch is: %f\n" % std_dev_fetch)

    stop_tcpprobe()
    if qmon is not None:
        qmon.terminate()
    net.stop()
    # Ensure that all processes you create within Mininet are killed.
    # Sometimes they require manual killing.
    Popen("pgrep -f webserver.py | xargs kill -9", shell=True).wait()
예제 #7
0
def bufferbloat():
    if not os.path.exists(args.dir):
        os.makedirs(args.dir)
    os.system("sysctl -w net.ipv4.tcp_congestion_control=%s" % args.cong)

    # Cleanup any leftovers from previous mininet runs
    cleanup()

    topo = BBTopo()
    net = Mininet(topo=topo, host=CPULimitedHost, link=TCLink)
    net.start()
    # This dumps the topology and how nodes are interconnected through
    # links.
    dumpNodeConnections(net.hosts)
    # This performs a basic all pairs ping test.
    net.pingAll()

    # Start all the monitoring processes
    start_tcpprobe("cwnd.txt")
    start_ping(net)

    # TODO: Start monitoring the queue sizes.  Since the switch I
    # created is "s0", I monitor one of the interfaces.  Which
    # interface?  The interface numbering starts with 1 and increases.
    # Depending on the order you add links to your network, this
    # number may be 1 or 2.  Ensure you use the correct number.
    #
    qmon = start_qmon(iface='s0-eth2', outfile='%s/q.txt' % (args.dir))
    # qmon = None

    # TODO: Start iperf, webservers, etc.
    start_iperf(net)
    start_webserver(net)

    # Hint: The command below invokes a CLI which you can use to
    # debug.  It allows you to run arbitrary commands inside your
    # emulated hosts h1 and h2.
    #
    # CLI(net)

    # TODO: measure the time it takes to complete webpage transfer
    # from h1 to h2 (say) 3 times.  Hint: check what the following
    # command does: curl -o /dev/null -s -w %{time_total} google.com
    # Now use the curl command to fetch webpage from the webserver you
    # spawned on host h1 (not from google!)
    # Hint: have a separate function to do this and you may find the
    # loop below useful.
    h1 = net.get('h1')
    h2 = net.get('h2')
    #Record the start time to help us know whether we should stop the simulation
    start_time = time()

    #The list save the download times in fixed duration
    fetch_times = []

    #The list save tuples (fetch time point, download time) (which is [fetch_clock, float(fetch_time)] returned by measure_fetch_time)
    measure_data = []
    while True:
        # do the measurement (say) 3 times.
        fetch_info = measure_fetch_time(h1, h2)

        #get the download time
        fetch_time = fetch_info[1]
        print "fetch time %s" % fetch_time

        #add every download time to the list
        fetch_times.append(float(fetch_time))

        #add every tuple (fetch time point, download time) to the list
        measure_data.append(fetch_info)

        #download the webpage from h1 every two seconds
        sleep(1)
        now = time()
        delta = now - start_time
        if delta > args.time:
            break
        print "%.1fs left..." % (args.time - delta)

    #save the time points and download times to the file
    save_download_time_to_file(measure_data)

    # TODO: compute average (and standard deviation) of the fetch
    # times.  You don't need to plot them.  Just note it in your
    # README and explain.
    ave = helper.avg(fetch_times)
    std = helper.stdev(fetch_times)
    print "Average: " + str(ave)
    print "Standard deviation: " + str(std)

    #save the average (and standard deviation) of the fetch to the file
    save_ave_std_to_file(str(ave), str(std))

    stop_tcpprobe()
    if qmon is not None:
        qmon.terminate()
    net.stop()
    # Ensure that all processes you create within Mininet are killed.
    # Sometimes they require manual killing.
    Popen("pgrep -f webserver.py | xargs kill -9", shell=True).wait()