Esempio n. 1
0
    def __init__(self,
                 num_iterations,
                 network_configurations,
                 num_measurements,
                 measurement_rates,
                 number_of_test_cases,
                 number_of_RT_flow_list,
                 number_of_BE_flow_list,
                 base_delay_budget,
                 link_delay_upper_bound):

        super(QosDemo, self).__init__("number_of_hosts", num_iterations)
        self.network_configurations = network_configurations
        self.num_measurements = num_measurements

        self.measurement_rates = measurement_rates

        self.number_of_test_cases = number_of_test_cases
        self.number_of_RT_flow_list = number_of_RT_flow_list
        self.number_of_BE_flow_list = number_of_BE_flow_list

        self.base_delay_budget = base_delay_budget
        self.link_delay_upper_bound = link_delay_upper_bound

        self.cm = ControllerMan(controller="ryu")
        self.cm.stop_controller()
        time.sleep(5)
        self.controller_port = self.cm.start_controller()

        self.data = {
            "Throughput": defaultdict(defaultdict),
            "99th Percentile Latency": defaultdict(defaultdict),
            "Maximum Latency": defaultdict(defaultdict)
        }
Esempio n. 2
0
    def __init__(self, num_iterations, network_configurations,
                 num_measurements):

        super(QosDemo, self).__init__("number_of_hosts", num_iterations)
        self.network_configurations = network_configurations
        self.num_measurements = num_measurements

        self.cm = ControllerMan(controller="ryu")
        self.cm.stop_controller()
        time.sleep(5)
        self.controller_port = self.cm.start_controller()

        self.data = {
            "Throughput": defaultdict(defaultdict),
            "99th Percentile Latency": defaultdict(defaultdict),
            "Maximum Latency": defaultdict(defaultdict)
        }
    def setup_network_graph(self,
                            mininet_setup_gap=None,
                            synthesis_setup_gap=None):

        if not self.load_config and self.save_config:

            if self.controller == "ryu":

                self.cm = ControllerMan(controller=self.controller)
                self.cm.start_controller()

                #time.sleep(mininet_setup_gap)
                self.start_mininet()
                if mininet_setup_gap:
                    time.sleep(mininet_setup_gap)

            # These things are needed by network graph...
            self.get_switches()
            self.get_host_nodes()
            self.get_links()

            self.ng = NetworkGraph(network_configuration=self)
            self.ng.parse_network_graph()

            if self.synthesis_name:

                # Now the synthesis...
                self.trigger_synthesis(synthesis_setup_gap)

                # Refresh just the switches in the network graph, post synthesis
                self.get_switches()
                self.ng.parse_network_graph()
                #self.ng.parse_switches()

        else:
            self.ng = NetworkGraph(network_configuration=self)
            self.ng.parse_network_graph()

        print "total_flow_rules:", self.ng.total_flow_rules

        return self.ng
Esempio n. 4
0
class QosDemo(Experiment):

    def __init__(self,
                 num_iterations,
                 network_configurations,
                 num_measurements):

        super(QosDemo, self).__init__("number_of_hosts", num_iterations)
        self.network_configurations = network_configurations
        self.num_measurements = num_measurements

        self.cm = ControllerMan(controller="ryu")
        self.cm.stop_controller()
        time.sleep(5)
        self.controller_port = self.cm.start_controller()

        self.data = {
            "Throughput": defaultdict(defaultdict),
            "99th Percentile Latency": defaultdict(defaultdict),
            "Maximum Latency": defaultdict(defaultdict)
        }

    def trigger(self):

        for nc in self.network_configurations:
            print "network_configuration:", nc

            nc.setup_network_graph(mininet_setup_gap=1, synthesis_setup_gap=1)
            nc.init_flow_specs()

            # mhasan: MCP code will go there
            mcph.find_path_by_mcp(nc)  # update the path in 'path' variable of FlowSpecification
            mcph.synthesize_flow_specifications(nc)
            # nc.mininet_obj.pingAll('1')
            self.measure_flow_rates(nc)

        print "here"


    def parse_iperf_output(self, iperf_output_string):
        data_lines =  iperf_output_string.split('\r\n')
        interesting_line_index = None
        for i in xrange(len(data_lines)):
             if data_lines[i].endswith('Server Report:'):
                interesting_line_index = i + 1
        data_tokens =  data_lines[interesting_line_index].split()
        print "Transferred Rate:", data_tokens[7]
        print "Jitter:", data_tokens[9]

    def parse_ping_output(self,ping_output_string):

        data_lines = ping_output_string.split('\r\n')
        interesting_line_index = None
        for i in xrange(len(data_lines)):
            if data_lines[i].startswith('5 packets transmitted'):
                interesting_line_index = i + 1
        data_tokens = data_lines[interesting_line_index].split()
        data_tokens = data_tokens[3].split('/')
        print 'Min Delay:', data_tokens[0]
        print 'Avg Delay:', data_tokens[1]
        print 'Max Delay:', data_tokens[2]

    def measure_flow_rates(self, nc):

        for i in range(self.num_iterations):
            print "iteration:", i + 1

            for j in range(self.num_measurements):

                max_fs_duration = 0

                for fs in nc.flow_specs:

                    if not fs.measurement_rates:
                        continue

                    os.system('killall netserver') # kill all previous netserver instance

                    server_command = "/usr/local/bin/netserver"
                    server_output = fs.mn_dst_host.cmd(server_command)
                    client_output = fs.mn_src_host.cmd(fs.construct_netperf_cmd_str(fs.measurement_rates[j]))

                    if fs.tests_duration > max_fs_duration:
                        max_fs_duration = fs.tests_duration

                # Sleep for 10 seconds more than flow duration to make sure netperf has finished.
                time.sleep(max_fs_duration + 10)

                for fs in nc.flow_specs:

                    if not fs.measurement_rates:
                        continue

                    print "=== netperf output ==="
                    netperf_output_string = fs.mn_src_host.read()
                    print netperf_output_string
                    if netperf_output_string.find("no response received") >= 0:
                        raise ValueError("No response received from netperf...")

                    fs.measurements[fs.measurement_rates[j]].append(fs.parse_measurements(netperf_output_string))
class NetworkConfiguration(object):
    def __init__(self,
                 controller,
                 controller_ip,
                 controller_port,
                 controller_api_base_url,
                 controller_api_user_name,
                 controller_api_password,
                 topo_name,
                 topo_params,
                 conf_root,
                 synthesis_name,
                 synthesis_params,
                 roles,
                 project_name="test",
                 power_simulator_ip="127.0.0.1",
                 link_latency=""):

        self.controller = controller
        self.topo_name = topo_name
        self.topo_params = topo_params
        self.topo_name = topo_name
        self.conf_root = conf_root
        self.synthesis_name = synthesis_name
        self.synthesis_params = synthesis_params
        self.roles = roles
        self.project_name = project_name
        self.power_simulator_ip = power_simulator_ip
        self.link_latency = link_latency

        self.controller_ip = controller_ip
        self.controller_port = controller_port
        self.topo = None
        self.nc_topo_str = None
        self.init_topo()
        self.init_synthesis()

        self.mininet_obj = None
        self.cm = None
        self.ng = None

        # Setup the directory for saving configs, check if one does not exist,
        # if not, assume that the controller, cyber_network and rule synthesis needs to be triggered.
        self.conf_path = self.conf_root + str(self) + "/"
        if not os.path.exists(self.conf_path):
            os.makedirs(self.conf_path)
            self.load_config = False
            self.save_config = True
        else:
            self.load_config = False
            self.save_config = True

        self.h = httplib2.Http()
        self.controller_api_base_url = controller_api_base_url
        self.controller_api_base_url = controller_api_base_url
        self.h.add_credentials(controller_api_user_name,
                               controller_api_password)

    def __str__(self):
        return self.controller + "_" + str(self.synthesis) + "_" + str(
            self.topo)

    def __del__(self):
        self.cm.stop_controller()
        self.cleanup_mininet()

    def init_topo(self):
        if self.topo_name == "ring":
            self.topo = RingTopo(self.topo_params)
            self.nc_topo_str = "Ring topology with " + str(
                self.topo.total_switches) + " switches"
        elif self.topo_name == "clostopo":
            self.topo = ClosTopo(self.topo_params)
            self.nc_topo_str = "Clos topology with " + str(
                self.topo.total_switches) + " switches"
        elif self.topo_name == "linear":
            self.topo = LinearTopo(self.topo_params)
            self.nc_topo_str = "Linear topology with " + str(
                self.topo_params["num_switches"]) + " switches"
        elif self.topo_name == "clique":
            self.topo = CliqueTopo(self.topo_params)
            self.nc_topo_str = "Linear topology with " + str(
                self.topo_params["num_switches"]) + " switches"
        elif self.topo_name == "clique_enterprise":
            self.topo = CliqueEnterpriseTopo(self.topo_params)
            self.nc_topo_str = "Clique Enterprise topology with " + str(
                self.topo_params["num_switches"]) + " switches"
        else:

            raise NotImplementedError("Topology: %s" % self.topo_name)

    def init_synthesis(self):
        if self.synthesis_name == "DijkstraSynthesis":
            self.synthesis_params["master_switch"] = self.topo_name == "linear"
            self.synthesis = DijkstraSynthesis(self.synthesis_params)

        elif self.synthesis_name == "AboresceneSynthesis":
            self.synthesis = AboresceneSynthesis(self.synthesis_params)
        elif self.synthesis_name == "SimpleMACSynthesis":
            self.synthesis = SimpleMACSynthesis(self.synthesis_params)
        else:
            self.synthesis = None

    def prepare_all_flow_specifications(self):

        flow_specs = []

        flow_match = Match(is_wildcard=True)
        #flow_match["ethernet_type"] = 0x0800

        for src_host_id, dst_host_id in permutations(self.ng.host_ids, 2):

            if src_host_id == dst_host_id:
                continue

            fs = FlowSpecification(src_host_id, dst_host_id, flow_match)
            fs.ng_src_host = self.ng.get_node_object(src_host_id)
            fs.ng_dst_host = self.ng.get_node_object(dst_host_id)
            fs.mn_src_host = self.mininet_obj.get(src_host_id)
            fs.mn_dst_host = self.mininet_obj.get(dst_host_id)

            flow_specs.append(fs)

        return flow_specs

    def trigger_synthesis(self, synthesis_setup_gap):

        if self.synthesis_name == "DijkstraSynthesis":
            self.synthesis.network_graph = self.ng
            self.synthesis.synthesis_lib = SynthesisLib(
                "localhost", "8181", self.ng)
            self.synthesis.synthesize_all_node_pairs()

        elif self.synthesis_name == "AboresceneSynthesis":
            self.synthesis.network_graph = self.ng
            self.synthesis.synthesis_lib = SynthesisLib(
                "localhost", "8181", self.ng)
            flow_match = Match(is_wildcard=True)
            flow_match["ethernet_type"] = 0x0800
            self.synthesis.synthesize_all_switches(flow_match, 2)

        elif self.synthesis_name == "SimpleMACSynthesis":
            self.synthesis.network_graph = self.ng
            self.synthesis.synthesis_lib = SynthesisLib(
                "localhost", "8181", self.ng)
            flow_specs = self.prepare_all_flow_specifications()
            self.synthesis.synthesize_flow_specifications(flow_specs)

        if synthesis_setup_gap:
            time.sleep(synthesis_setup_gap)

        if self.mininet_obj:
            #self.mininet_obj.pingAll()

            # full_data = self.mininet_obj.pingFull(hosts=[self.mininet_obj.get('h1'),
            #                                              self.mininet_obj.get('h2')])
            # print full_data
            """
            h1 = self.mininet_obj.get('h1')
            h2 = self.mininet_obj.get('h2')

            s1 = self.mininet_obj.get('s1')

            cmd = "ping -c3 " + h2.IP()
            output = h1.cmd(cmd)

            macAddr = os.popen("ifconfig -a s1-eth1 | grep HWaddr | awk -F \' \' \'{print $5}\'").read().rstrip('\n')
            #macAddr = str(proc.stdout.read())
            os.system("sudo tcprewrite --enet-smac=" + str(macAddr) + " --infile=/home/ubuntu/Desktop/Workspace/NetPower_TestBed/test.pcap --outfile=/home/ubuntu/Desktop/Workspace/NetPower_TestBed/test2.pcap")

            cmd = "sudo tcpreplay -i s1-eth1 /home/ubuntu/Desktop/Workspace/NetPower_TestBed/test2.pcap"
            os.system(cmd)
            #output = h1.cmd(cmd)

            print "here"
            """

    def get_ryu_switches(self):
        ryu_switches = {}

        # Get all the ryu_switches from the inventory API
        remaining_url = 'stats/switches'
        resp, content = self.h.request(
            self.controller_api_base_url + remaining_url, "GET")

        #CLI(self.mininet_obj)

        #import pdb; pdb.set_trace()

        ryu_switch_numbers = json.loads(content)

        for dpid in ryu_switch_numbers:

            this_ryu_switch = {}

            # Get the flows
            remaining_url = 'stats/flow' + "/" + str(dpid)
            resp, content = self.h.request(
                self.controller_api_base_url + remaining_url, "GET")

            if resp["status"] == "200":
                switch_flows = json.loads(content)
                switch_flow_tables = defaultdict(list)
                for flow_rule in switch_flows[str(dpid)]:
                    switch_flow_tables[flow_rule["table_id"]].append(flow_rule)
                this_ryu_switch["flow_tables"] = switch_flow_tables
            else:
                print "Error pulling switch flows from RYU."

            # Get the ports
            remaining_url = 'stats/portdesc' + "/" + str(dpid)
            resp, content = self.h.request(
                self.controller_api_base_url + remaining_url, "GET")

            if resp["status"] == "200":
                switch_ports = json.loads(content)
                this_ryu_switch["ports"] = switch_ports[str(dpid)]
            else:
                print "Error pulling switch ports from RYU."

            # Get the groups
            remaining_url = 'stats/groupdesc' + "/" + str(dpid)
            resp, content = self.h.request(
                self.controller_api_base_url + remaining_url, "GET")

            if resp["status"] == "200":
                switch_groups = json.loads(content)
                this_ryu_switch["groups"] = switch_groups[str(dpid)]
            else:
                print "Error pulling switch ports from RYU."

            ryu_switches[dpid] = this_ryu_switch

        with open(self.conf_path + "ryu_switches.json", "w") as outfile:
            json.dump(ryu_switches, outfile)

    def get_onos_switches(self):

        # Get all the onos_switches from the inventory API
        remaining_url = 'devices'
        resp, content = self.h.request(
            self.controller_api_base_url + remaining_url, "GET")

        onos_switches = json.loads(content)

        for this_switch in onos_switches["devices"]:

            # Get the flows
            remaining_url = 'flows' + "/" + this_switch["id"]
            resp, content = self.h.request(
                self.controller_api_base_url + remaining_url, "GET")

            if resp["status"] == "200":
                switch_flows = json.loads(content)
                switch_flow_tables = defaultdict(list)
                for flow_rule in switch_flows["flows"]:
                    switch_flow_tables[flow_rule["tableId"]].append(flow_rule)
                this_switch["flow_tables"] = switch_flow_tables
            else:
                print "Error pulling switch flows from Onos."

            # Get the ports

            remaining_url = "links?device=" + this_switch["id"]
            resp, content = self.h.request(
                self.controller_api_base_url + remaining_url, "GET")

            if resp["status"] == "200":
                switch_links = json.loads(content)["links"]
                this_switch["ports"] = {}
                for link in switch_links:
                    if link["src"]["device"] == this_switch["id"]:
                        this_switch["ports"][link["src"]["port"]] = link["src"]
                    elif link["dst"]["device"] == this_switch["id"]:
                        this_switch["ports"][link["dst"]["port"]] = link["dst"]
            else:
                print "Error pulling switch ports from RYU."

            # Get the groups
            remaining_url = 'groups' + "/" + this_switch["id"]
            resp, content = self.h.request(
                self.controller_api_base_url + remaining_url, "GET")

            if resp["status"] == "200":
                this_switch["groups"] = json.loads(content)["groups"]
            else:
                print "Error pulling switch ports from RYU."

        with open(self.conf_path + "onos_switches.json", "w") as outfile:
            json.dump(onos_switches, outfile)

    def get_mininet_host_nodes(self):

        mininet_host_nodes = {}

        for sw in self.topo.switches():
            mininet_host_nodes[sw] = []
            for h in self.get_all_switch_hosts(sw):
                mininet_host_dict = {
                    "host_switch_id": "s" + sw[1:],
                    "host_name": h.name,
                    "host_IP": h.IP(),
                    "host_MAC": h.MAC()
                }

                mininet_host_nodes[sw].append(mininet_host_dict)

        with open(self.conf_path + "mininet_host_nodes.json", "w") as outfile:
            json.dump(mininet_host_nodes, outfile)

        return mininet_host_nodes

    def get_onos_host_nodes(self):

        # Get all the onos_hosts from the inventory API
        remaining_url = 'hosts'
        resp, content = self.h.request(
            self.controller_api_base_url + remaining_url, "GET")

        onos_hosts = json.loads(content)["hosts"]

        with open(self.conf_path + "onos_hosts.json", "w") as outfile:
            json.dump(onos_hosts, outfile)

        return onos_hosts

    def get_host_nodes(self):
        if self.controller == "ryu":
            self.get_mininet_host_nodes()
        elif self.controller == "onos":
            self.get_onos_host_nodes()
        else:
            raise NotImplemented

    def get_mininet_links(self):

        mininet_port_links = {}

        with open(self.conf_path + "mininet_port_links.json", "w") as outfile:
            json.dump(self.topo.ports, outfile)

        return mininet_port_links

    def get_onos_links(self):
        # Get all the onos_links from the inventory API
        remaining_url = 'links'
        resp, content = self.h.request(
            self.controller_api_base_url + remaining_url, "GET")

        onos_links = json.loads(content)["links"]

        with open(self.conf_path + "onos_links.json", "w") as outfile:
            json.dump(onos_links, outfile)

        return onos_links

    def get_links(self):
        if self.controller == "ryu":
            self.get_mininet_links()
        elif self.controller == "onos":
            self.get_onos_links()
        else:
            raise NotImplementedError

    def get_switches(self):
        # Now the output of synthesis is carted away
        if self.controller == "ryu":
            self.get_ryu_switches()
        elif self.controller == "onos":
            self.get_onos_switches()
        else:
            raise NotImplementedError

    def setup_network_graph(self,
                            mininet_setup_gap=None,
                            synthesis_setup_gap=None):

        if not self.load_config and self.save_config:

            if self.controller == "ryu":

                self.cm = ControllerMan(controller=self.controller)
                self.cm.start_controller()

                #time.sleep(mininet_setup_gap)
                self.start_mininet()
                if mininet_setup_gap:
                    time.sleep(mininet_setup_gap)

            # These things are needed by network graph...
            self.get_switches()
            self.get_host_nodes()
            self.get_links()

            self.ng = NetworkGraph(network_configuration=self)
            self.ng.parse_network_graph()

            if self.synthesis_name:

                # Now the synthesis...
                self.trigger_synthesis(synthesis_setup_gap)

                # Refresh just the switches in the network graph, post synthesis
                self.get_switches()
                self.ng.parse_network_graph()
                #self.ng.parse_switches()

        else:
            self.ng = NetworkGraph(network_configuration=self)
            self.ng.parse_network_graph()

        print "total_flow_rules:", self.ng.total_flow_rules

        return self.ng

    def start_mininet(self):

        self.cleanup_mininet()

        if self.controller == "ryu":
            self.mininet_obj = Mininet(
                topo=self.topo,
                cleanup=True,
                autoStaticArp=True,
                link=TCLink,
                controller=lambda name: RemoteController(
                    name, ip=self.controller_ip, port=self.controller_port),
                switch=partial(OVSSwitch, protocols='OpenFlow13'))

            #self.set_switch_netdevice_owners()

            self.mininet_obj.start()

    def cleanup_mininet(self):

        if self.mininet_obj:
            print "Mininet cleanup..."
            #self.mininet_obj.stop()

        os.system("sudo mn -c")

    def get_all_switch_hosts(self, switch_id):

        p = self.topo.ports

        for node in p:

            # Only look for this switch's hosts
            if node != switch_id:
                continue

            for switch_port in p[node]:
                dst_list = p[node][switch_port]
                dst_node = dst_list[0]
                if dst_node.startswith("h"):
                    yield self.mininet_obj.get(dst_node)

    def get_mininet_hosts_obj(self):
        for sw in self.topo.switches():
            for h in self.get_all_switch_hosts(sw):
                yield h

    def is_host_pair_pingable(self, src_host, dst_host):
        hosts = [src_host, dst_host]
        ping_loss_rate = self.mininet_obj.ping(hosts, '1')

        # If some packets get through, then declare pingable
        if ping_loss_rate < 100.0:
            return True
        else:
            # If not, do a double check:
            cmd_output = src_host.cmd("ping -c 3 " + dst_host.IP())
            print cmd_output
            if cmd_output.find("0 received") != -1:
                return False
            else:
                return True

    def are_all_hosts_pingable(self):
        ping_loss_rate = self.mininet_obj.pingAll('1')

        # If some packets get through, then declare pingable
        if ping_loss_rate < 100.0:
            return True
        else:
            return False

    def get_intf_status(self, ifname):

        # set some symbolic constants
        SIOCGIFFLAGS = 0x8913
        null256 = '\0' * 256

        # create a socket so we have a handle to query
        s = socket(AF_INET, SOCK_DGRAM)

        # call ioctl() to get the flags for the given interface
        result = fcntl.ioctl(s.fileno(), SIOCGIFFLAGS, ifname + null256)

        # extract the interface's flags from the return value
        flags, = struct.unpack('H', result[16:18])

        # check "UP" bit and print a message
        up = flags & 1

        return ('down', 'up')[up]

    def wait_until_link_status(self, sw_i, sw_j, intended_status):

        num_seconds = 0

        for link in self.mininet_obj.links:
            if (sw_i in link.intf1.name and sw_j in link.intf2.name) or (
                    sw_i in link.intf2.name and sw_j in link.intf1.name):

                while True:
                    status_i = self.get_intf_status(link.intf1.name)
                    status_j = self.get_intf_status(link.intf2.name)

                    if status_i == intended_status and status_j == intended_status:
                        break

                    time.sleep(1)
                    num_seconds += 1

        return num_seconds

    def is_bi_connected_manual_ping_test(self,
                                         experiment_host_pairs_to_check,
                                         edges_to_try=None):

        is_bi_connected = True

        if not edges_to_try:
            edges_to_try = self.topo.g.edges()

        for edge in edges_to_try:

            # Only try and break switch-switch edges
            if edge[0].startswith("h") or edge[1].startswith("h"):
                continue

            for (src_host, dst_host) in experiment_host_pairs_to_check:

                is_pingable_before_failure = self.is_host_pair_pingable(
                    src_host, dst_host)

                if not is_pingable_before_failure:
                    print "src_host:", src_host, "dst_host:", dst_host, "are not connected."
                    is_bi_connected = False
                    break

                self.mininet_obj.configLinkStatus(edge[0], edge[1], 'down')
                self.wait_until_link_status(edge[0], edge[1], 'down')
                time.sleep(5)
                is_pingable_after_failure = self.is_host_pair_pingable(
                    src_host, dst_host)
                self.mininet_obj.configLinkStatus(edge[0], edge[1], 'up')
                self.wait_until_link_status(edge[0], edge[1], 'up')

                time.sleep(5)
                is_pingable_after_restoration = self.is_host_pair_pingable(
                    src_host, dst_host)

                if not is_pingable_after_failure == True:
                    is_bi_connected = False
                    print "Got a problem with edge:", edge, " for src_host:", src_host, "dst_host:", dst_host
                    break

        return is_bi_connected

    def is_bi_connected_manual_ping_test_all_hosts(self, edges_to_try=None):

        is_bi_connected = True

        if not edges_to_try:
            edges_to_try = self.topo.g.edges()

        for edge in edges_to_try:

            # Only try and break switch-switch edges
            if edge[0].startswith("h") or edge[1].startswith("h"):
                continue

            is_pingable_before_failure = self.are_all_hosts_pingable()

            if not is_pingable_before_failure:
                is_bi_connected = False
                break

            self.mininet_obj.configLinkStatus(edge[0], edge[1], 'down')
            self.wait_until_link_status(edge[0], edge[1], 'down')
            time.sleep(5)
            is_pingable_after_failure = self.are_all_hosts_pingable()
            self.mininet_obj.configLinkStatus(edge[0], edge[1], 'up')
            self.wait_until_link_status(edge[0], edge[1], 'up')

            time.sleep(5)
            is_pingable_after_restoration = self.are_all_hosts_pingable()

            if not is_pingable_after_failure == True:
                is_bi_connected = False
                break

        return is_bi_connected

    def parse_iperf_output(self, iperf_output_string):
        data_lines = iperf_output_string.split('\r\n')
        interesting_line_index = None
        for i in xrange(len(data_lines)):
            if data_lines[i].endswith('Server Report:'):
                interesting_line_index = i + 1
        data_tokens = data_lines[interesting_line_index].split()
        print "Transferred Rate:", data_tokens[7]
        print "Jitter:", data_tokens[9]

    def parse_ping_output(self, ping_output_string):

        data_lines = ping_output_string.split('\r\n')
        interesting_line_index = None
        for i in xrange(len(data_lines)):
            if data_lines[i].startswith('5 packets transmitted'):
                interesting_line_index = i + 1
        data_tokens = data_lines[interesting_line_index].split()
        data_tokens = data_tokens[3].split('/')
        print 'Min Delay:', data_tokens[0]
        print 'Avg Delay:', data_tokens[1]
        print 'Max Delay:', data_tokens[2]

    def set_netdevice_owner_in_timekeeper(self, intfNames, pid):
        for name in intfNames:
            if name != "lo":
                print "Setting net-device owner for ", name
                set_netdevice_owner(pid, name)

    def set_switch_netdevice_owners(self):
        import pdb
        pdb.set_trace()
        for i in xrange(0, len(self.mininet_obj.switches)):
            mininet_switch = self.mininet_obj.switches[i]
            # set netdevices owner
            self.set_netdevice_owner_in_timekeeper(mininet_switch.intfNames(),
                                                   mininet_switch.pid)
Esempio n. 6
0
class QosDemo(Experiment):

    def __init__(self,
                 num_iterations,
                 network_configurations,
                 num_measurements):

        super(QosDemo, self).__init__("number_of_hosts", num_iterations)
        self.network_configurations = network_configurations
        self.num_measurements = num_measurements

        self.cm = ControllerMan(controller="ryu")
        self.cm.stop_controller()
        time.sleep(5)
        self.controller_port = self.cm.start_controller()

        self.data = {
            "Throughput": defaultdict(defaultdict),
            "99th Percentile Latency": defaultdict(defaultdict),
            "Maximum Latency": defaultdict(defaultdict)
        }

    def trigger(self):

        for nc in self.network_configurations:
            print "network_configuration:", nc

            nc.setup_network_graph(mininet_setup_gap=1, synthesis_setup_gap=1)
            nc.init_flow_specs()

            # mhasan: MCP code probably will go there
            # nc.calculate_path_by_mcp()
            # mcp_helper = MCP_Helper(nc, 100)

            #path = mcph.find_path_by_mcp(nc)
            mcph.find_path_by_mcp(nc)
            '''
            if not path:
                print "No path found!"
            else:
                print path
            '''

            # mcp_helper.random_print()
            # mcp_helper.calculate_path_by_mcp()

            nc.synthesis.synthesize_flow_specifications(nc.flow_specs)
            self.measure_flow_rates(nc)

        print "here"

    def parse_iperf_output(self, iperf_output_string):
        data_lines =  iperf_output_string.split('\r\n')
        interesting_line_index = None
        for i in xrange(len(data_lines)):
             if data_lines[i].endswith('Server Report:'):
                interesting_line_index = i + 1
        data_tokens =  data_lines[interesting_line_index].split()
        print "Transferred Rate:", data_tokens[7]
        print "Jitter:", data_tokens[9]

    def parse_ping_output(self,ping_output_string):

        data_lines = ping_output_string.split('\r\n')
        interesting_line_index = None
        for i in xrange(len(data_lines)):
            if data_lines[i].startswith('5 packets transmitted'):
                interesting_line_index = i + 1
        data_tokens = data_lines[interesting_line_index].split()
        data_tokens = data_tokens[3].split('/')
        print 'Min Delay:', data_tokens[0]
        print 'Avg Delay:', data_tokens[1]
        print 'Max Delay:', data_tokens[2]

    def measure_flow_rates(self, nc):

        for i in range(self.num_iterations):
            print "iteration:", i + 1

            for j in range(self.num_measurements):

                max_fs_duration = 0

                for fs in nc.flow_specs:

                    if not fs.measurement_rates:
                        continue

                    server_output = fs.mn_dst_host.cmd("/usr/local/bin/netserver")
                    client_output = fs.mn_src_host.cmd(fs.construct_netperf_cmd_str(fs.measurement_rates[j]))

                    if fs.tests_duration > max_fs_duration:
                        max_fs_duration = fs.tests_duration

                # Sleep for 5 seconds more than flow duration to make sure netperf has finished.
                time.sleep(max_fs_duration + 5)

                for fs in nc.flow_specs:

                    if not fs.measurement_rates:
                        continue

                    fs.measurements[fs.measurement_rates[j]].append(fs.parse_measurements(fs.mn_src_host.read()))
Esempio n. 7
0
class QosDemo(Experiment):

    def __init__(self,
                 num_iterations,
                 network_configurations,
                 num_measurements,
                 measurement_rates,
                 number_of_test_cases,
                 number_of_RT_flow_list,
                 number_of_BE_flow_list,
                 base_delay_budget,
                 link_delay_upper_bound):

        super(QosDemo, self).__init__("number_of_hosts", num_iterations)
        self.network_configurations = network_configurations
        self.num_measurements = num_measurements

        self.measurement_rates = measurement_rates

        self.number_of_test_cases = number_of_test_cases
        self.number_of_RT_flow_list = number_of_RT_flow_list
        self.number_of_BE_flow_list = number_of_BE_flow_list

        self.base_delay_budget = base_delay_budget
        self.link_delay_upper_bound = link_delay_upper_bound

        self.cm = ControllerMan(controller="ryu")
        self.cm.stop_controller()
        time.sleep(5)
        self.controller_port = self.cm.start_controller()

        self.data = {
            "Throughput": defaultdict(defaultdict),
            "99th Percentile Latency": defaultdict(defaultdict),
            "Maximum Latency": defaultdict(defaultdict)
        }

    def trigger(self):

        for nc in self.network_configurations:
            print "network_configuration:", nc

            nc.setup_network_graph(mininet_setup_gap=1, synthesis_setup_gap=1)
            nc.init_flow_specs()
            nc.calibrate_delay(self.base_delay_budget)

            # mhasan: MCP code will go there
            # mcph.print_delay_budget(nc)

            mcph.find_path_by_mcp(nc)  # update the path in 'path' variable of FlowSpecification

            if not mcph.test_all_flow_is_schedulable(nc):
                print "Network configuration is NOT feasible (no path found)!"
                nc.isFeasible = False
                continue

            # mcph.print_path(nc)
            # with only RT_flows
            # mcph.synthesize_flow_specifications(nc)

            # usues default queue (no delay-guarantee)
            #mcph.synthesize_flow_specifications_default_queue(nc)

            # Synthesize flows (may have both RT and BE
            mcph.synthesize_flow_specifications_with_best_effort(nc)

            # nc.mininet_obj.pingAll('1')
            self.measure_flow_rates(nc)

        self.parse_flow_measurement_output()
        print "Experiment Done!"

    def parse_iperf_output(self, iperf_output_string):
        data_lines =  iperf_output_string.split('\r\n')
        interesting_line_index = None
        for i in xrange(len(data_lines)):
             if data_lines[i].endswith('Server Report:'):
                interesting_line_index = i + 1
        data_tokens =  data_lines[interesting_line_index].split()
        print "Transferred Rate:", data_tokens[7]
        print "Jitter:", data_tokens[9]

    def parse_ping_output(self,ping_output_string):

        data_lines = ping_output_string.split('\r\n')
        interesting_line_index = None
        for i in xrange(len(data_lines)):
            if data_lines[i].startswith('5 packets transmitted'):
                interesting_line_index = i + 1
        data_tokens = data_lines[interesting_line_index].split()
        data_tokens = data_tokens[3].split('/')
        print 'Min Delay:', data_tokens[0]
        print 'Avg Delay:', data_tokens[1]
        print 'Max Delay:', data_tokens[2]

    def measure_flow_rates(self, nc):

        for i in range(self.num_iterations):
            print "iteration:", i + 1

            for j in range(self.num_measurements):

                max_fs_duration = 0
                os.system('killall netserver')  # kill all previous netserver instance

                for fs in nc.flow_specs:

                    if not fs.measurement_rates:
                        continue

                    server_command = "/usr/local/bin/netserver"
                    server_output = fs.mn_dst_host.cmd(server_command)
                    client_output = fs.mn_src_host.cmd(fs.construct_netperf_cmd_str(fs.measurement_rates[j]))

                    if fs.tests_duration > max_fs_duration:
                        max_fs_duration = fs.tests_duration

                # Sleep for 5 seconds more than flow duration to make sure netperf has finished.
                time.sleep(max_fs_duration + 5)

                fcnt = 0  # a counter to print flow number
                for fs in nc.flow_specs:

                    if not fs.measurement_rates:
                        continue

                    fcnt += 1

                    signal.signal(signal.SIGALRM, timeout)
                    # see whether there is any output from netperf
                    signal.alarm(10)

                    print "Running for flow-id: {}".format(fcnt)
                    try:
                        netperf_output_string = fs.mn_src_host.read()
                    except MyTimeOutException:
                        print "==== Timeout while reading netperf output. Aborting... ===="
                        continue
                    else:
                        # disable alarm
                        signal.alarm(0)
                        signal.signal(signal.SIGALRM, signal.SIG_DFL)

                        print "Delay Budget (e2e):{} microsecond, Max possible delay (round-trip):{} microsecond.".format(
                            fs.delay_budget*1000*1000,  # in us
                            # link_delay_upper bound in us
                            self.link_delay_upper_bound * nx.diameter(nc.ng.get_node_graph()) * 2)


                        print "=== netperf output: [Flow {} to {} ({}), Rate: {}, Test ID: {}, #of RT-Flow: {}, #of BE-Flow: {}, Flow ID: {}] ===".format(
                            fs.src_host_id, fs.dst_host_id, fs.tag,
                            fs.measurement_rates[j], nc.test_case_id, nc.number_of_RT_flows, nc.number_of_BE_flows,
                            fcnt)

                        print netperf_output_string

                        try:
                            s1 = fs.parse_measurements(netperf_output_string)
                        except StandardError:
                            print "Invalid result from netperf. Unable to parse Flow ID {}!".format(fcnt)
                            fs.measurements[fs.measurement_rates[j]].append(fs.get_null_measurement())
                        else:
                            fs.measurements[fs.measurement_rates[j]].append(fs.parse_measurements(netperf_output_string))
                            #print "saving netperf results!"

    def parse_flow_measurement_output(self):

        output_data_list = []  # maximum latency in any iteration for each of the flows

        for nc in self.network_configurations:

            # checking whether we have any solution for the network configuration
            if not nc.isFeasible:
                continue

            for j in range(self.num_measurements):

                max_mean_latency = 0
                max_max_latency = 0
                max_nn_latency = 0
                min_throughput = "inf"
                max_delay_budget = 0
                min_delay_budget = "inf"

                for fs in nc.flow_specs:
                    if not fs.measurement_rates:
                        continue

                    # we consider only real-time flows measurements
                    if fs.tag == "best-effort":
                        # print "best-effort, ignoring....."
                        continue

                    tmp = fs.measurements[fs.measurement_rates[j]]

                    mean_latency_list = [d['mean_latency'] for d in tmp if 'mean_latency' in d]
                    max_latency_list = [d['max_latency'] for d in tmp if 'max_latency' in d]
                    nn_latency_list = [d['nn_perc_latency'] for d in tmp if 'nn_perc_latency' in d]
                    min_throughput_list = [d['throughput'] for d in tmp if 'throughput' in d]

                    # take only +ve
                    mean_latency_list = [x for x in mean_latency_list if x >= 0]
                    max_latency_list = [x for x in max_latency_list if x >= 0]
                    nn_latency_list = [x for x in nn_latency_list if x >= 0]
                    min_throughput_list = [x for x in min_throughput_list if x >= 0]

                    # max_mean_latency_iter = max(mean_latency_list)
                    # max_max_latency_iter = max(max_latency_list)  # saves the max of maximum latency
                    # max_nn_latency_iter = max(nn_latency_list)  # 99P latency
                    # min_throughput_iter = min(min_throughput_list)  # saves minimum throughput

                    max_mean_latency_iter = np.mean(np.array(mean_latency_list).astype(np.float))
                    max_max_latency_iter = np.mean(np.array(max_latency_list).astype(np.float))  # saves the mean of maximum latency
                    max_nn_latency_iter = np.mean(np.array(nn_latency_list).astype(np.float))  # 99P latency
                    min_throughput_iter = np.mean(np.array(min_throughput_list).astype(np.float))  # saves minimum throughput

                    if max_mean_latency_iter > max_mean_latency:
                        max_mean_latency = max_mean_latency_iter

                    if max_max_latency_iter > max_max_latency:
                        max_max_latency = max_max_latency_iter

                    if max_nn_latency_iter > max_nn_latency:
                        max_nn_latency = max_nn_latency_iter

                    if min_throughput_iter < min_throughput:
                        min_throughput = min_throughput_iter

                    if fs.delay_budget > max_delay_budget:
                        max_delay_budget = fs.delay_budget

                    if fs.delay_budget < min_delay_budget:
                        min_delay_budget = fs.delay_budget

                diameter = nx.diameter(nc.ng.get_node_graph())
                max_possible_delay = self.link_delay_upper_bound * diameter
                max_possible_delay *= 1000  # convert to microsecond (since netperf output in microsecond)
                max_bw_req = max(self.measurement_rates)

                val_dict = {"number_of_RT_flows": nc.number_of_RT_flows,
                            "number_of_BE_flows": nc.number_of_BE_flows,
                            "max_possible_delay_e2e": max_possible_delay,  # this is end-to-end (NOT round-trip)
                            "measurement_rates": self.measurement_rates[j],
                            "max_mean_latency": float(max_mean_latency),
                            "max_max_latency": float(max_max_latency),
                            "max_nn_latency": float(max_nn_latency),
                            "min_throughput": float(min_throughput),
                            "max_delay_budget_e2e": max_delay_budget * 1000000,  # in microsecond
                            "min_delay_budget_e2e": min_delay_budget * 1000000,  # in microsecond
                            "max_bw_req": max_bw_req}

                output_data_list.append(val_dict)


        #print output_data_list
        # save data to workspace for plotting
        print "Writing data as pickle object..."
        with open('objs.pickle', 'w') as f:
            pickle.dump([self.number_of_RT_flow_list,
                         self.number_of_BE_flow_list,
                         self.number_of_test_cases,
                         self.measurement_rates,
                         self.base_delay_budget,
                         output_data_list], f)
Esempio n. 8
0
class QosDemo(Experiment):

    def __init__(self,
                 num_iterations,
                 network_configurations,
                 num_measurements,
                 measurement_rates,
                 number_of_test_cases,
                 number_of_RT_flow_list,
                 number_of_BE_flow_list,
                 base_delay_budget_list,
                 link_delay_upper_bound):

        super(QosDemo, self).__init__("number_of_hosts", num_iterations)
        self.network_configurations = network_configurations
        self.num_measurements = num_measurements

        self.measurement_rates = measurement_rates

        self.number_of_test_cases = number_of_test_cases
        self.number_of_RT_flow_list = number_of_RT_flow_list
        self.number_of_BE_flow_list = number_of_BE_flow_list

        self.base_delay_budget_list = base_delay_budget_list
        self.link_delay_upper_bound = link_delay_upper_bound

        self.cm = ControllerMan(controller="ryu")
        self.cm.stop_controller()
        time.sleep(5)
        self.controller_port = self.cm.start_controller()

        self.data = {
            "Throughput": defaultdict(defaultdict),
            "99th Percentile Latency": defaultdict(defaultdict),
            "Maximum Latency": defaultdict(defaultdict)
        }

        self.shed_count = defaultdict(dict)

    def init_output_result_dictonary(self):

        for number_of_RT_flows in self.number_of_RT_flow_list:
                for delay_budget in self.base_delay_budget_list:
                    self.shed_count[number_of_RT_flows][delay_budget] = 0

    def trigger(self):

        self.init_output_result_dictonary()
        time.sleep(5) # sleep for a while if ryu or anything needs some time
        os.system("pkill ryu-manager")  # cleanup

        print "Experiment starting..."

        for nc in self.network_configurations:
            #print "network_configuration:", nc
            print "######################################"
            print "#of flow: {}, delay: {}, iteration: {}".format(nc.number_of_RT_flows,
                                                                  nc.min_delay_budget_for_all_flows, nc.test_case_id)
            print "######################################"

            #nc.setup_network_graph(mininet_setup_gap=1, synthesis_setup_gap=1)
            nc.setup_network_graph_without_mininet()

            # nc.init_flow_specs()
            nc.calibrate_delay(nc.min_delay_budget_for_all_flows)

            # mhasan: MCP code will go there
            # mcph.print_delay_budget(nc)

            mcph.find_path_by_mcp(nc)  # update the path in 'path' variable of FlowSpecification

            if not mcph.test_all_flow_is_schedulable(nc):
                print "Network configuration is NOT feasible (no path found)!"
                nc.isFeasible = False
                #continue
            else:
                print "Network configuration is feasible (path found for all flows)!"
                nc.isFeasible = True

                # increase schedulability count
                self.shed_count[nc.number_of_RT_flows][nc.min_delay_budget_for_all_flows] += 1
                # nc.cleanup_mininet()
                # os.system("pkill mn")  # cleanup
                # os.system("pkill ryu-manager")  # cleanup


        self.write_data_to_file('schedulability_traces.pickle')
        #print self.shed_count
        print "Experiment Done!"

    def get_minimum_diameter(self):

        # return minimum diameter from all the network configurations
        mindia = float('Inf')
        for nc in self.network_configurations:
            if mindia > nc.network_diameter:
                mindia = nc.network_diameter
        return mindia

    def write_data_to_file(self, filename):

        # save data to workspace for plotting
        print "Writing data as pickle object..."
        with open(filename, 'w') as f:
            pickle.dump([self.number_of_RT_flow_list,
                         self.number_of_test_cases,
                         self.base_delay_budget_list,
                         self.shed_count,
                         self.get_minimum_diameter()], f)