Exemple #1
0
 def connect_to_switches(self):
     """Connects to all the switches in the topology and saves them
      in self.controllers.
     """
     for p4switch in self.topo.get_p4switches():
         thrift_port = self.topo.get_thrift_port(p4switch)
         self.controllers[p4switch] = SimpleSwitchAPI(thrift_port)
Exemple #2
0
 def __init__(self, sw_name):
     self.sw_name = sw_name
     self.topo = Topology(db='topology.db')
     self.thrift_port = self.topo.get_thrift_port(sw_name)
     self.controller = SimpleSwitchAPI(self.thrift_port)
     self.sub = None
     self.flag = 1
    def __init__(self, sw_name):

        self.sw_name = sw_name
        self.topo = Topology(db="topology.db")
        self.sw_name = sw_name
        self.thrift_port = self.topo.get_thrift_port(sw_name)
        self.controller = SimpleSwitchAPI(self.thrift_port)
Exemple #4
0
 def __init__(self):
     self.topo = Topology(db='topology.db')
     dic = self.topo.get_p4switches()
     self.sw_name = [sw for sw in dic.keys()]
     self.controller = {
         sw: SimpleSwitchAPI(self.topo.get_thrift_port(sw))
         for sw in self.sw_name
     }
     self.multicast_table = dict()
class ReadCounters(object):
    def __init__(self, sw_name):

        self.topo = Topology(db="topology.db")
        self.sw_name = sw_name
        self.thrift_port = self.topo.get_thrift_port(sw_name)
        self.controller = SimpleSwitchAPI(self.thrift_port)

    def direct(self):

        entries = self.controller.table_num_entries("count_table")
        for i in range(int(entries)):
            self.controller.counter_read("direct_port_counter", i)

    def indirect(self):

        for i in range(5):
            self.controller.counter_read("port_counter", i)
Exemple #6
0
class DigestController():
    def __init__(self, sw_name):

        self.sw_name = sw_name
        self.topo = Topology(db="topology.db")
        self.sw_name = sw_name
        self.thrift_port = self.topo.get_thrift_port(sw_name)
        self.controller = SimpleSwitchAPI(self.thrift_port)

    def recv_msg_digest(self, msg):

        topic, device_id, ctx_id, list_id, buffer_id, num = struct.unpack(
            "<iQiiQi", msg[:32])
        #print num, len(msg)
        offset = 9
        msg = msg[32:]
        for sub_message in range(num):
            random_num, src, dst = struct.unpack("!BII", msg[0:offset])
            print "random number:", random_num, "src ip:", str(
                ipaddress.IPv4Address(src)), "dst ip:", str(
                    ipaddress.IPv4Address(dst))
            self.controller.table_add("whitelist", "NoAction",
                                      [str(ipaddress.IPv4Address(src))])
            self.controller.table_add("whitelist", "NoAction",
                                      [str(ipaddress.IPv4Address(dst))])
            msg = msg[offset:]

        self.controller.client.bm_learning_ack_buffer(ctx_id, list_id,
                                                      buffer_id)

    def run_digest_loop(self):

        sub = nnpy.Socket(nnpy.AF_SP, nnpy.SUB)
        notifications_socket = self.controller.client.bm_mgmt_get_info(
        ).notifications_socket
        print "connecting to notification sub %s" % notifications_socket
        sub.connect(notifications_socket)
        sub.setsockopt(nnpy.SUB, nnpy.SUB_SUBSCRIBE, '')

        while True:
            msg = sub.recv()
            self.recv_msg_digest(msg)
class ReadCounters(object):
	# initialize register reader
	def __init__(self, sw_name):
		self.topo = Topology(db="./topology.db")
		self.sw_name = sw_name
		self.thrift_port = self.topo.get_thrift_port(sw_name)
		self.controller = SimpleSwitchAPI(self.thrift_port)

	def get_qlen(self):
		qlens = []
		packets = []
		for i in range(1, 5):
			# controller.register_read can read a certain register with specific index
			# Here we read register from index 1 to 4,
			# because in P4 implementation,
			# We put the queue length data and packets data of the i-th port
			# into the register with index i
			packets.append(self.controller.register_read("in_port_packets", i))
			qlens.append(self.controller.register_read("in_port_qlen", i))
		return (packets, qlens)
Exemple #8
0
    def __init__(self, topo_db, sw_name, ip_controller, port_controller, log_dir, \
        monitoring=True, routing_file=None):

        self.topo = Topology(db=topo_db)
        self.sw_name = sw_name
        self.thrift_port = self.topo.get_thrift_port(sw_name)
        self.cpu_port = self.topo.get_cpu_port_index(self.sw_name)
        self.controller = SimpleSwitchAPI(self.thrift_port)
        self.controller.reset_state()
        self.log_dir = log_dir

        print 'connecting to ', ip_controller, port_controller
        # Socket used to communicate with the controller
        self.sock_controller = socket.socket(socket.AF_INET,
                                             socket.SOCK_STREAM)
        server_address = (ip_controller, port_controller)
        self.sock_controller.connect(server_address)
        print 'Connected!'

        # Send the switch name to the controller
        self.sock_controller.sendall(str(sw_name))

        self.make_logging()

        if monitoring:
            # Monitoring scheduler
            self.t_sched = sched_timer.RepeatingTimer(10, 0.5, self.scheduling)
            self.t_sched.start()

        self.mapping_dic = {}
        tmp = list(self.topo.get_hosts()) + list(self.topo.get_p4switches())
        self.mapping_dic = {k: v for v, k in enumerate(tmp)}
        self.log.info(str(self.mapping_dic))

        self.routing_file = routing_file
        print 'routing_file ', routing_file
        if self.routing_file is not None:
            json_data = open(self.routing_file)
            self.topo_routing = json.load(json_data)
class ReadCounters(object):
    # initialize register reader
    def __init__(self, sw_name):
        self.topo = Topology(db="./topology.db")
        self.sw_name = sw_name
        self.thrift_port = self.topo.get_thrift_port(sw_name)
        self.controller = SimpleSwitchAPI(self.thrift_port)

    def get_qlen(self):
        packets = []

        data = getRegisterNameWidth()
        registerWidth = int(data[1])
        registerName = data[0]

        for i in range(0, registerWidth):
            packets.append(self.controller.register_read(registerName, i))
        return (packets)
Exemple #10
0
class DigestController():

    def __init__(self, sw_name):

        self.sw_name = sw_name
        self.topo = Topology(db="topology.db")
        self.sw_name = sw_name
        self.thrift_port = self.topo.get_thrift_port(sw_name)
        self.controller = SimpleSwitchAPI(self.thrift_port)

    def recv_msg_digest(self, msg):

        topic, device_id, ctx_id, list_id, buffer_id, num = struct.unpack("<iQiiQi",
                                                                     msg[:32])
        #print num, len(msg)
        offset = 17  #number of bytes in digest message
        msg = msg[32:]
        for sub_message in range(num):
            #msg_type, src, dst = struct.unpack("!BII", msg[0:offset])
            msg_type, arg1, arg2, arg3, arg4 = struct.unpack("!BIIII", msg[0:offset])
            if msg_type == 0:
                print "------------------------------------------------------------"
                print "This is a debug message --> action is executed successfully!"
                print "Message:", msg_type, "data", arg1, "extra:", arg2
                print "------------------------------------------------------------"
            elif msg_type == 1:
                print "message type:", msg_type, "src ip:", str(ipaddress.IPv4Address(arg1)), "dst ip:", str(ipaddress.IPv4Address(arg2))
                self.controller.table_add("whitelist", "NoAction", [str(ipaddress.IPv4Address(arg1))])
            elif msg_type == 2:
                print "message type:", msg_type, "connection is added with Hash:", str(arg1), "diff:", str(arg2)
                self.controller.table_add("connections", "saveDifferenceValue", [str(arg1)], [str(arg2)])
                print "message type:", msg_type, "connection is added with Hash:", str(arg3), "diff:", str(arg4)
                self.controller.table_add("connections", "saveDifferenceValue", [str(arg3)], [str(arg4)])
            else:
                print("Unknown message type!")
            msg = msg[offset:]


        self.controller.client.bm_learning_ack_buffer(ctx_id, list_id, buffer_id)

    def run_digest_loop(self):

        sub = nnpy.Socket(nnpy.AF_SP, nnpy.SUB)
        notifications_socket = self.controller.client.bm_mgmt_get_info().notifications_socket
        print "connecting to notification sub %s" % notifications_socket
        sub.connect(notifications_socket)
        sub.setsockopt(nnpy.SUB, nnpy.SUB_SUBSCRIBE, '')

        while True:
            msg = sub.recv()
            self.recv_msg_digest(msg)
class L2Controller(object):
    def __init__(self, sw_name):
        #self.topo = Topology(db="topology.db")
        self.sw_name = sw_name
        self.thrift_port = 9090
        #self.thrift_port = self.topo.get_thrift_port(sw_name)
        #self.cpu_port =  self.topo.get_cpu_port_index(self.sw_name)
        self.cpu_port = 2
        self.controller = SimpleSwitchAPI(self.thrift_port)

        self.init()

    def init(self):
        self.controller.reset_state()
        #self.add_boadcast_groups()
        self.add_mirror()
        #self.fill_table_test()

    def add_mirror(self):
        if self.cpu_port:
            self.controller.mirroring_add(100, self.cpu_port)

    def read_register(self):
        ns_recv = self.controller.register_read("ns_recv")
        na_recv = self.controller.register_read("na_recv")
        ns_filter = self.controller.register_read("ns_filter")
        na_filter = self.controller.register_read("na_filter")
        ns_recv_no_zero = []
        for x in ns_recv:
            if x != 0:
                ns_recv_no_zero.append(x)
        na_recv_no_zero = []
        for x in na_recv:
            if x != 0:
                na_recv_no_zero.append(x)
        ns_filter_no_zero = []
        for x in ns_filter:
            if x != 0:
                ns_filter_no_zero.append(x)
        na_filter_no_zero = []
        for x in na_filter:
            if x != 0:
                na_filter_no_zero.append(x)
        print "ns_recv: ", len(ns_recv_no_zero), ns_recv_no_zero
        print "ns_filter: ", len(ns_filter_no_zero), ns_filter_no_zero
        print "na_recv: ", len(na_recv_no_zero), na_recv_no_zero
        print "na_filter: ", len(na_filter_no_zero), na_filter_no_zero
class LearningSwitchControllerApp(object):

    def __init__(self, switchName):
        self.topo = Topology(db="topology.db")
        self.switchName = switchName
        self.thrift_port = self.topo.get_thrift_port(switchName)
        self.cpu_port = self.topo.get_cpu_port_index(self.switchName)
        self.controller = SimpleSwitchAPI(self.thrift_port)

        self.init()

    def init(self):
        self.controller.reset_state()
        self.add_mcast_grp()
        self.add_mirror()

    def add_mirror(self):
        if self.cpu_port:
            self.controller.mirroring_add(MIRROR_SESSION_ID, self.cpu_port)

    def add_mcast_grp(self):
        interfaces_to_port = self.topo[self.switchName]["interfaces_to_port"].copy()
        # filter lo and cpu port
        interfaces_to_port.pop('lo', None)
        interfaces_to_port.pop(self.topo.get_cpu_port_intf(self.switchName), None)

        mc_grp_id = 1
        rid = 0
        # add multicast group
        self.controller.mc_mgrp_create(mc_grp_id)
        port_list = interfaces_to_port.values()[:]
        # add multicast node group
        handle = self.controller.mc_node_create(rid, port_list)
        # associate with mc grp
        self.controller.mc_node_associate(mc_grp_id, handle)

    def learn(self, learningData):
        for macAddr, ingressPort in learningData:
            print("macAddr: %012X ingressPort: %s ", macAddr, ingressPort)
            self.controller.table_add("srcMacAddr", "NoAction", [str(macAddr)])
            self.controller.table_add("dstMacAddr", "forward", [
                                      str(macAddr)], [str(ingressPort)])

    def recv_msg_cpu(self, pkt):

        packet = Ether(str(pkt))
        if packet.type == L2_LEARN_ETHER_TYPE:
            cpu_header = CpuHeader(bytes(packet.payload))
            self.learn([(cpu_header.macAddr, cpu_header.ingressPort)])

    def run_cpu_port_loop(self):

        cpu_port_intf = str(self.topo.get_cpu_port_intf(
            self.switchName).replace("eth0", "eth1"))
        sniff(iface=cpu_port_intf, prn=self.recv_msg_cpu)
class FloodingController(object):

    def __init__(self, sw_name):

        self.topo = Topology(db="topology.db")
        self.sw_name = sw_name
        self.thrift_port = self.topo.get_thrift_port(sw_name)
        self.cpu_port =  self.topo.get_cpu_port_index(self.sw_name)
        self.controller = SimpleSwitchAPI(self.thrift_port)
        self.init()

    def init(self):

        self.controller.reset_state()
        self.fill_dmac_table()
        self.add_boadcast_groups()

    def fill_dmac_table(self):
        self.controller.table_add("dmac", "forward", ['00:00:0a:00:00:01'], ['1'])
        self.controller.table_add("dmac", "forward", ['00:00:0a:00:00:02'], ['2'])
        self.controller.table_add("dmac", "forward", ['00:00:0a:00:00:03'], ['3'])
        self.controller.table_add("dmac", "forward", ['00:00:0a:00:00:04'], ['4'])
        self.controller.table_set_default("dmac", "broadcast", [])

    def add_boadcast_groups(self):

        interfaces_to_port = self.topo[self.sw_name]["interfaces_to_port"].copy()
        #filter lo and cpu port
        interfaces_to_port.pop('lo', None)
        interfaces_to_port.pop(self.topo.get_cpu_port_intf(self.sw_name), None)

        mc_grp_id = 1
        rid = 0
        for ingress_port in interfaces_to_port.values():

            port_list = interfaces_to_port.values()[:]
            del(port_list[port_list.index(ingress_port)])

            #add multicast group
            self.controller.mc_mgrp_create(mc_grp_id)

            #add multicast node group
            handle = self.controller.mc_node_create(rid, port_list)

            #associate with mc grp
            self.controller.mc_node_associate(mc_grp_id, handle)

            #fill broadcast table
            self.controller.table_add("select_mcast_grp", "set_mcast_grp", [str(ingress_port)], [str(mc_grp_id)])

            mc_grp_id +=1
            rid +=1
Exemple #14
0
class BlinkController:

    def __init__(self, topo_db, sw_name, ip_controller, port_controller, log_dir, \
        monitoring=True, routing_file=None):

        self.topo = Topology(db=topo_db)
        self.sw_name = sw_name
        self.thrift_port = self.topo.get_thrift_port(sw_name)
        self.cpu_port = self.topo.get_cpu_port_index(self.sw_name)
        self.controller = SimpleSwitchAPI(self.thrift_port)
        self.controller.reset_state()
        self.log_dir = log_dir

        print 'connecting to ', ip_controller, port_controller
        # Socket used to communicate with the controller
        self.sock_controller = socket.socket(socket.AF_INET,
                                             socket.SOCK_STREAM)
        server_address = (ip_controller, port_controller)
        self.sock_controller.connect(server_address)
        print 'Connected!'

        # Send the switch name to the controller
        self.sock_controller.sendall(str(sw_name))

        self.make_logging()

        if monitoring:
            # Monitoring scheduler
            self.t_sched = sched_timer.RepeatingTimer(10, 0.5, self.scheduling)
            self.t_sched.start()

        self.mapping_dic = {}
        tmp = list(self.topo.get_hosts()) + list(self.topo.get_p4switches())
        self.mapping_dic = {k: v for v, k in enumerate(tmp)}
        self.log.info(str(self.mapping_dic))

        self.routing_file = routing_file
        print 'routing_file ', routing_file
        if self.routing_file is not None:
            json_data = open(self.routing_file)
            self.topo_routing = json.load(json_data)

    def make_logging(self):
        # Logger for the pipeline
        logger.setup_logger('p4_to_controller', self.log_dir+'/p4_to_controller_'+ \
            str(self.sw_name)+'.log', level=logging.INFO)
        self.log = logging.getLogger('p4_to_controller')

        # Logger for the sliding window
        logger.setup_logger('p4_to_controller_sw', self.log_dir+'/p4_to_controller_'+ \
            str(self.sw_name)+'_sw.log', level=logging.INFO)
        self.log_sw = logging.getLogger('p4_to_controller_sw')

        # Logger for the rerouting
        logger.setup_logger('p4_to_controller_rerouting', self.log_dir+'/p4_to_controller_'+ \
            str(self.sw_name)+'_rerouting.log', level=logging.INFO)
        self.log_rerouting = logging.getLogger('p4_to_controller_rerouting')

        # Logger for the Flow Selector
        logger.setup_logger('p4_to_controller_fs', self.log_dir+'/p4_to_controller_'+ \
            str(self.sw_name)+'_fs.log', level=logging.INFO)
        self.log_fs = logging.getLogger('p4_to_controller_fs')

    def scheduling(self):

        for host in list(self.topo.get_hosts()):
            prefix = self.topo.get_host_ip(host) + '/24'

            # Print log about the sliding window
            for id_prefix in [
                    self.mapping_dic[host] * 2, self.mapping_dic[host] * 2 + 1
            ]:

                with HiddenPrints():
                    sw_time = float(
                        self.controller.register_read('sw_time',
                                                      index=id_prefix)) / 1000.
                    sw_index = self.controller.register_read('sw_index',
                                                             index=id_prefix)
                    sw_sum = self.controller.register_read('sw_sum',
                                                           index=id_prefix)
                self.log_sw.info('sw_time\t' + host + '\t' + prefix + '\t' +
                                 str(id_prefix) + '\t' + str(sw_time))
                self.log_sw.info('sw_index\t' + host + '\t' + prefix + '\t' +
                                 str(id_prefix) + '\t' + str(sw_index))

                if sw_sum >= 32:
                    self.log_sw.info('sw_sum\t' + host + '\t' + prefix + '\t' +
                                     str(id_prefix) + '\t' + str(sw_sum) +
                                     '\tREROUTING')
                else:
                    self.log_sw.info('sw_sum\t' + host + '\t' + prefix + '\t' +
                                     str(id_prefix) + '\t' + str(sw_sum))

                sw = []
                tmp = 'sw ' + host + ' ' + prefix + ' ' + str(id_prefix) + '\t'
                for i in range(0, 10):
                    with HiddenPrints():
                        binvalue = int(
                            self.controller.register_read(
                                'sw', (id_prefix * 10) + i))
                    tmp = tmp + str(binvalue) + ','
                    sw.append(binvalue)
                tmp = tmp[:-1]
                self.log_sw.info(str(tmp))

        # Print log about rerouting
        for host in list(self.topo.get_hosts()):
            prefix = self.topo.get_host_ip(host) + '/24'

            for id_prefix in [
                    self.mapping_dic[host] * 2, self.mapping_dic[host] * 2 + 1
            ]:

                with HiddenPrints():
                    nh_avaibility_1 = self.controller.register_read(
                        'nh_avaibility_1', index=id_prefix)
                    nh_avaibility_2 = self.controller.register_read(
                        'nh_avaibility_2', index=id_prefix)
                    nh_avaibility_3 = self.controller.register_read(
                        'nh_avaibility_3', index=id_prefix)
                    nbflows_progressing_2 = self.controller.register_read(
                        'nbflows_progressing_2', index=id_prefix)
                    nbflows_progressing_3 = self.controller.register_read(
                        'nbflows_progressing_3', index=id_prefix)
                    rerouting_ts = self.controller.register_read(
                        'rerouting_ts', index=id_prefix)
                    threshold = self.controller.register_read(
                        'threshold_registers', index=id_prefix)

                self.log_rerouting.info('nh_avaibility\t'+host+'\t'+prefix+'\t'+ \
                str(id_prefix)+'\t'+str(nh_avaibility_1)+'\t'+ \
                str(nh_avaibility_2)+'\t'+str(nh_avaibility_3))
                self.log_rerouting.info('nblows_progressing\t'+host+'\t'+prefix+'\t'+ \
                str(id_prefix)+'\t'+str(nbflows_progressing_2)+'\t'+ \
                str(nbflows_progressing_3))
                self.log_rerouting.info('rerouting_ts\t'+host+'\t'+prefix+'\t'+ \
                str(id_prefix)+'\t'+str(rerouting_ts))
                self.log_rerouting.info('threshold\t'+host+'\t'+prefix+'\t'+ \
                str(id_prefix)+'\t'+str(threshold))

                nexthop_str = ''
                nha = [nh_avaibility_1, nh_avaibility_2, nh_avaibility_3]
                i = 0
                if self.routing_file is not None:
                    bgp_type = 'customer' if id_prefix % 2 == 0 else 'customer_provider_peer'
                    if bgp_type not in self.topo_routing['switches'][
                            self.sw_name]['prefixes'][host]:
                        nexthop_str = 'NoPathAvailable'
                    else:
                        if len(self.topo_routing['switches'][self.sw_name]
                               ['prefixes'][host][bgp_type]) == 2:
                            self.topo_routing['switches'][self.sw_name][
                                'prefixes'][host][bgp_type].append(
                                    self.topo_routing['switches'][self.sw_name]
                                    ['prefixes'][host][bgp_type][-1])
                        for nexthop in self.topo_routing['switches'][
                                self.sw_name]['prefixes'][host][bgp_type]:
                            tmp = 'y' if nha[i] == 0 else 'n'
                            nexthop_str = nexthop_str + str(
                                nexthop) + '(' + tmp + ')\t'
                            i += 1
                        nexthop_str = nexthop_str[:-1]
                self.log_rerouting.info('nexthop\t'+host+'\t'+prefix+'\t'+ \
                str(id_prefix)+'\t'+str(nexthop_str))

        # Print log about the flow selector
        for host in list(self.topo.get_hosts()):
            prefix = self.topo.get_host_ip(host) + '/24'

            for id_prefix in [
                    self.mapping_dic[host] * 2, self.mapping_dic[host] * 2 + 1
            ]:

                sw = []
                tmp = 'fs_key ' + host + ' ' + prefix + ' ' + str(
                    id_prefix) + '\t'
                for i in range(0, 64):
                    with HiddenPrints():
                        binvalue = int(
                            self.controller.register_read(
                                'flowselector_key', 64 * id_prefix + i))
                    tmp = tmp + str(binvalue) + ','
                    sw.append(binvalue)
                tmp = tmp[:-1]
                self.log_fs.info(str(tmp))

                sw = []
                tmp = 'fs ' + host + ' ' + prefix + ' ' + str(id_prefix) + '\t'
                for i in range(0, 64):
                    with HiddenPrints():
                        binvalue = int(
                            self.controller.register_read(
                                'flowselector_ts', 64 * id_prefix + i))
                    tmp = tmp + str(binvalue) + ','
                    sw.append(binvalue)
                tmp = tmp[:-1]
                self.log_fs.info(str(tmp))

                sw = []
                tmp = 'fs_last_ret ' + host + ' ' + prefix + ' ' + str(
                    id_prefix) + '\t'
                for i in range(0, 64):
                    with HiddenPrints():
                        binvalue = int(
                            self.controller.register_read(
                                'flowselector_last_ret', 64 * id_prefix + i))
                    tmp = tmp + str(binvalue) + ','
                    sw.append(binvalue)
                tmp = tmp[:-1]
                self.log_fs.info(str(tmp))

                sw = []
                tmp = 'fs_last_ret_bin ' + host + ' ' + prefix + ' ' + str(
                    id_prefix) + '\t'
                for i in range(0, 64):
                    with HiddenPrints():
                        binvalue = int(
                            self.controller.register_read(
                                'flowselector_last_ret_bin',
                                64 * id_prefix + i))
                    tmp = tmp + str(binvalue) + ','
                    sw.append(binvalue)
                tmp = tmp[:-1]
                self.log_fs.info(str(tmp))

                sw = []
                tmp = 'fs_fwloops ' + host + ' ' + prefix + ' ' + str(
                    id_prefix) + '\t'
                for i in range(0, 64):
                    with HiddenPrints():
                        binvalue = int(
                            self.controller.register_read(
                                'flowselector_fwloops', 64 * id_prefix + i))
                    tmp = tmp + str(binvalue) + ','
                    sw.append(binvalue)
                tmp = tmp[:-1]
                self.log_fs.info(str(tmp))

                sw = []
                tmp = 'fs_correctness ' + host + ' ' + prefix + ' ' + str(
                    id_prefix) + '\t'
                for i in range(0, 64):
                    with HiddenPrints():
                        binvalue = int(
                            self.controller.register_read(
                                'flowselector_correctness',
                                64 * id_prefix + i))
                    tmp = tmp + str(binvalue) + ','
                    sw.append(binvalue)
                tmp = tmp[:-1]
                self.log_fs.info(str(tmp))

    def forwarding(self):
        p4switches = self.topo.get_p4switches()
        interfaces_to_node = p4switches[self.sw_name]['interfaces_to_node']

        for k, v in interfaces_to_node.items():

            try:
                dst_mac = self.topo.get_hosts()[v][self.sw_name]['mac']
            except KeyError:
                dst_mac = self.topo.get_p4switches()[v][self.sw_name]['mac']

            src_mac = p4switches[self.sw_name][v]['mac']
            outport = p4switches[self.sw_name]['interfaces_to_port'][
                p4switches[self.sw_name][v]['intf']]

            self.log.info('table add send set_nh ' + str(self.mapping_dic[v]) +
                          ' => ' + str(outport) + ' ' + str(src_mac) + ' ' +
                          str(dst_mac))
            self.controller.table_add(
                'send', 'set_nh', [str(self.mapping_dic[v])],
                [str(outport), str(src_mac),
                 str(dst_mac)])

    def run(self):

        sock_list = [self.sock_controller]
        controller_data = ''

        while True:
            inready, outready, excepready = select.select(sock_list, [], [])

            for sock in inready:
                if sock == self.sock_controller:
                    data_tmp = ''
                    toreturn = None

                    try:
                        data_tmp = sock.recv(100000000)
                    except socket.error, e:
                        err = e.args[0]
                        if not (err == errno.EAGAIN
                                or err == errno.EWOULDBLOCK):
                            print 'p4_to_controller: ', e
                            sock.close()
                            sock = None

                    if len(data_tmp) > 0:
                        controller_data += data_tmp

                        next_data = ''
                        while len(controller_data
                                  ) > 0 and controller_data[-1] != '\n':
                            next_data = controller_data[-1] + next_data
                            controller_data = controller_data[:-1]

                        toreturn = controller_data
                        controller_data = next_data

                    if toreturn is not None:
                        for line in toreturn.split('\n'):
                            if line.startswith('table add '):
                                line = line.rstrip('\n').replace(
                                    'table add ', '')

                                fwtable_name = line.split(' ')[0]
                                action_name = line.split(' ')[1]

                                match_list = line.split(' => ')[0].split(
                                    ' ')[2:]
                                action_list = line.split(' => ')[1].split(' ')

                                print line
                                print fwtable_name, action_name, match_list, action_list

                                self.log.info(line)
                                self.controller.table_add(fwtable_name, action_name, \
                                    match_list, action_list)

                            if line.startswith('do_register_write'):
                                line = line.rstrip('\n')
                                linetab = line.split(' ')

                                register_name = linetab[1]
                                index = int(linetab[2])
                                value = int(linetab[3])

                                self.log.info(line)
                                self.controller.register_write(register_name, \
                                    index, value)

                            if line.startswith('reset_states'):
                                self.log.info('RESETTING_STATES')

                                # First stop the scheduler to avoid concurrent used
                                # of the Thirft server
                                self.t_sched.cancel()
                                while self.t_sched.running:  # Wait the end of the log printing
                                    time.sleep(0.5)

                                time.sleep(1)

                                # Reset the state of the switch
                                self.controller.register_reset(
                                    'nh_avaibility_1')
                                self.controller.register_reset(
                                    'nh_avaibility_2')
                                self.controller.register_reset(
                                    'nh_avaibility_3')
                                self.controller.register_reset(
                                    'nbflows_progressing_2')
                                self.controller.register_reset(
                                    'nbflows_progressing_3')
                                self.controller.register_reset('rerouting_ts')
                                self.controller.register_reset(
                                    'timestamp_reference')
                                self.controller.register_reset('sw_time')
                                self.controller.register_reset('sw_index')
                                self.controller.register_reset('sw_sum')
                                self.controller.register_reset('sw')
                                self.controller.register_reset(
                                    'flowselector_key')
                                self.controller.register_reset(
                                    'flowselector_nep')
                                self.controller.register_reset(
                                    'flowselector_ts')
                                self.controller.register_reset(
                                    'flowselector_last_ret')
                                self.controller.register_reset(
                                    'flowselector_last_ret_bin')
                                self.controller.register_reset(
                                    'flowselector_correctness')
                                self.controller.register_reset(
                                    'flowselector_fwloops')

                                print self.sw_name, ' RESET.'

                                # Restart the scheduler
                                time.sleep(1)
                                self.t_sched.start()
Exemple #15
0
 def __init__(self, *args, **kwargs):
     self._switch_api = SimpleSwitchAPI(*args, **kwargs)
class packetReceicer(threading.Thread):
    def __init__(self, sw_name, program):
        threading.Thread.__init__(self)
        if program == "f":
            self.topo = Topology(
                db="../p4src_flowsize/topology.db")  #set the topology
        elif program == "i":
            self.topo = Topology(
                db="../p4src_interval/topology.db")  #set the topology
        self.sw_name = sw_name
        self.thrift_port = self.topo.get_thrift_port(sw_name)
        self.cpu_port = self.topo.get_cpu_port_index(self.sw_name)
        self.controller = SimpleSwitchAPI(self.thrift_port)
        self.flow = {}
        self.flag = True
        self.init()

    def init(self):
        self.add_mirror()
        self.counter = 1
        self.logs = open("../switch_log/" + self.sw_name + ".log", "w")
        self.logs_info = open("../switch_log/" + self.sw_name + "_info.log",
                              "w")
        self.logs_info.write("SWITCH[" + self.sw_name + "]\n")
        self.logs.close()
        self.logs_info.close()

    def add_mirror(self):
        if self.cpu_port:
            self.controller.mirroring_add(
                100, self.cpu_port)  # correspond to the 100 in p4 code
            #is there any probability to increase the mirro port to add cpu port?

    def recv_msg_cpu(self, pkt):
        ## console output starts
        #print
        #print("["+self.sw_name+"] received packet number:"+str(self.counter))
        self.counter += 1
        cpu = CPU(str(pkt))
        #ls(cpu)

        ## console output ends
        type = (cpu.flags >> 2)
        if self.flag == True:
            logs = open("../switch_log/" + self.sw_name + ".log", "w")
            self.flag = False
            if type == 0:
                logs.write("flowsize information collecting\n")
            else:
                logs.write("interval information collecting\n")

            logs.close()

        self.gen_per_packet_log(cpu)
        self.collect_log(cpu)
        if (self.counter % 1000 == 0):
            self.gen_log()

    def gen_log(self):
        logs_info = open("../switch_log/" + self.sw_name + "_info.log", "a")
        logs_info.write("[flow number: " + str(len(self.flow)) + "]\n")
        change = lambda x: '.'.join(
            [str(x / (256**i) % 256) for i in range(3, -1, -1)])

        cnt = 0
        for i in self.flow:
            cnt += self.flow[i]["packnum"]
            tmp = i.split(":")
            tmp[0] = change(int(tmp[0]))
            tmp[1] = change(int(tmp[1]))
            tmp = " : ".join(tmp)
            logs_info.write("flow " + tmp + " ")

            logs_info.write(str(sorted(self.flow[i].items())))
            logs_info.write("\n")
        logs_info.write("[packet number sum:" + str(cnt) + "]\n\n")

        logs_info.close()

    def collect_log(self, cpu):
        flow_key = str(cpu.srcAddr) + ":" + str(cpu.dstAddr) + ":" + str(
            cpu.protocol) + ":" + str(cpu.srcPort) + ":" + str(cpu.dstPort)
        if self.flow.has_key(flow_key):
            self.flow[flow_key]["packnum"] += 1
            self.flow[flow_key][self.get_lev(cpu.delay)] += 1
        else:
            self.flow[flow_key]={"packnum":1,"0->1":0,"1->2":0,\
                "2->3":0,"3->4":0,"4->5":0,"5->6":0,"6->7":0\
                ,"7+":0}#"7->8":0,"8->9":0,"9+":0}
            self.flow[flow_key][self.get_lev(cpu.delay)] += 1

    def get_lev(self, delay):
        time_interval = 1000
        if delay < time_interval * 1:
            return "0->1"
        elif delay < time_interval * 2:
            return "1->2"
        elif delay < time_interval * 3:
            return "2->3"
        elif delay < time_interval * 4:
            return "3->4"
        elif delay < time_interval * 5:
            return "4->5"
        elif delay < time_interval * 6:
            return "5->6"
        elif delay < time_interval * 7:
            return "6->7"
        # elif delay<time_interval*8:
        #     return "7->8"
        # elif delay<time_interval*9:
        #     return "8->9"
        else:
            return "7+"

    def gen_per_packet_log(self, cpu):
        logs = open("../switch_log/" + self.sw_name + ".log", "a")
        change = lambda x: '.'.join(
            [str(x / (256**i) % 256) for i in range(3, -1, -1)])

        srcAddr = change(cpu.srcAddr)
        dstAddr = change(cpu.dstAddr)
        tmp_delay = str(cpu.delay)
        delay = tmp_delay[-9:-6] + "s " + tmp_delay[-6:-3] + "ms " + tmp_delay[
            -3:] + "us"
        tmp_interval = str(cpu.interval)
        interval = tmp_interval[-9:-6] + "s " + tmp_interval[
            -6:-3] + "ms " + tmp_interval[-3:] + "us"
        sketch_fg = (cpu.flags >> 1) & 0x1
        has_SFH = cpu.flags & 0x1
        type = (cpu.flags >> 2) & 0x1

        logs.write('{"switch name":"' + self.sw_name + '",')
        logs.write('"packet number":"' + str(self.counter - 1) +
                   '","packet_info":{')
        logs.write('"srcAddr":"' + str(srcAddr) + '",')
        logs.write('"dstAddr":"' + str(dstAddr) + '",')
        logs.write('"protocol":"' + str(cpu.protocol) + '",')
        logs.write('"srcPort":"' + str(cpu.srcPort) + '",')
        logs.write('"dstPort":"' + str(cpu.dstPort) + '",')
        logs.write('"delay ":"' + delay + '",')
        logs.write('"interval":"' + interval)
        logs.write('"timestamp":' + str(time.time()))
        if type == 0:
            logs.write('",' + '"using sketch":"' + str(sketch_fg) + '",')
            logs.write('"bring SFH":' + str(bool(has_SFH)))
        else:
            logs.write('",' + '"using sketch":"' + str(sketch_fg) + '",')
            logs.write('"bring MIH":' + str(bool(has_SFH)))
        logs.write(" }}\n")
        logs.close()

    def run_cpu_port_loop(self):
        cpu_port_intf = str(
            self.topo.get_cpu_port_intf(self.sw_name).replace("eth0", "eth1"))
        #the cpu has two ports   could use two thread to sniff
        print(cpu_port_intf)
        print
        print(sniff(iface=cpu_port_intf, prn=self.recv_msg_cpu))

    def run(self):
        self.run_cpu_port_loop()
Exemple #17
0
class L2Controller(object):

    def __init__(self, sw_name):

        self.topo = Topology(db="topology.db")
        self.sw_name = sw_name
        self.thrift_port = self.topo.get_thrift_port(sw_name)
        self.cpu_port =  self.topo.get_cpu_port_index(self.sw_name)
        self.controller = SimpleSwitchAPI(self.thrift_port)

        self.init()

    def init(self):

        self.controller.reset_state()
        self.add_boadcast_groups()
        self.add_mirror()
        #self.fill_table_test()

    def add_mirror(self):

        if self.cpu_port:
            self.controller.mirroring_add(100, self.cpu_port)

    def add_boadcast_groups(self):

        interfaces_to_port = self.topo[self.sw_name]["interfaces_to_port"].copy()
        #filter lo and cpu port
        interfaces_to_port.pop('lo', None)
        interfaces_to_port.pop(self.topo.get_cpu_port_intf(self.sw_name), None)

        mc_grp_id = 1
        rid = 0
        for ingress_port in interfaces_to_port.values():

            port_list = interfaces_to_port.values()[:]
            del(port_list[port_list.index(ingress_port)])

            #add multicast group
            self.controller.mc_mgrp_create(mc_grp_id)

            #add multicast node group
            handle = self.controller.mc_node_create(rid, port_list)

            #associate with mc grp
            self.controller.mc_node_associate(mc_grp_id, handle)

            #fill broadcast table
            self.controller.table_add("broadcast", "set_mcast_grp", [str(ingress_port)], [str(mc_grp_id)])

            mc_grp_id +=1
            rid +=1

    def fill_table_test(self):
        self.controller.table_add("dmac", "forward", ['00:00:0a:00:00:01'], ['1'])
        self.controller.table_add("dmac", "forward", ['00:00:0a:00:00:02'], ['2'])
        self.controller.table_add("dmac", "forward", ['00:00:0a:00:00:03'], ['3'])
        self.controller.table_add("dmac", "forward", ['00:00:0a:00:00:04'], ['4'])


    def learn(self, learning_data):

        for mac_addr, ingress_port in  learning_data:
            print "mac: %012X ingress_port: %s " % (mac_addr, ingress_port)
            self.controller.table_add("smac", "NoAction", [str(mac_addr)])
            self.controller.table_add("dmac", "forward", [str(mac_addr)], [str(ingress_port)])

    def unpack_digest(self, msg, num_samples):

        digest = []
        print len(msg), num_samples
        starting_index = 32
        for sample in range(num_samples):
            mac0, mac1, ingress_port = struct.unpack(">LHH", msg[starting_index:starting_index+8])
            starting_index +=8
            mac_addr = (mac0 << 16) + mac1
            digest.append((mac_addr, ingress_port))

        return digest

    def recv_msg_digest(self, msg):

        topic, device_id, ctx_id, list_id, buffer_id, num = struct.unpack("<iQiiQi",
                                                                          msg[:32])
        digest = self.unpack_digest(msg, num)
        self.learn(digest)

        #Acknowledge digest
        self.controller.client.bm_learning_ack_buffer(ctx_id, list_id, buffer_id)


    def run_digest_loop(self):

        sub = nnpy.Socket(nnpy.AF_SP, nnpy.SUB)
        notifications_socket = self.controller.client.bm_mgmt_get_info().notifications_socket
        sub.connect(notifications_socket)
        sub.setsockopt(nnpy.SUB, nnpy.SUB_SUBSCRIBE, '')

        while True:
            msg = sub.recv()
            self.recv_msg_digest(msg)

    def recv_msg_cpu(self, pkt):

        packet = Ether(str(pkt))

        if packet.type == 0x1234:
            cpu_header = CpuHeader(packet.payload)
            self.learn([(cpu_header.macAddr, cpu_header.ingress_port)])

    def run_cpu_port_loop(self):

        cpu_port_intf = str(self.topo.get_cpu_port_intf(self.sw_name).replace("eth0", "eth1"))
        sniff(iface=cpu_port_intf, prn=self.recv_msg_cpu)
Exemple #18
0
 def connect_to_switches(self):
     for p4switch in self.topo.get_p4switches():
         thrift_port = self.topo.get_thrift_port(p4switch)
         #print "p4switch:", p4switch, "thrift_port:", thrift_port
         self.controllers[p4switch] = SimpleSwitchAPI(thrift_port)
 def connect_to_switches(self):
     for p4switch in self.topo.get_p4switches():
         thrift_port = self.topo.get_thrift_port(p4switch)
         self.controllers[p4switch] = SimpleSwitchAPI(thrift_port)
Exemple #20
0
class NCacheController(object):
    def __init__(self, sw_name, vtables_num=8):
        self.topo = Topology(db="../p4/topology.db")
        self.sw_name = sw_name
        self.thrift_port = self.topo.get_thrift_port(self.sw_name)
        self.cpu_port = self.topo.get_cpu_port_index(self.sw_name)
        self.controller = SimpleSwitchAPI(self.thrift_port)

        self.custom_calcs = self.controller.get_custom_crc_calcs()
        self.sketch_register_num = len(self.custom_calcs)

        self.vtables = []
        self.vtables_num = vtables_num

        # create a pool of ids (as much as the total amount of keys)
        # this pool will be used to assign index to keys which will be
        # used to index the cached key counter and the validity register
        self.ids_pool = range(0, VTABLE_ENTRIES * VTABLE_SLOT_SIZE)

        # array of bitmap, which marks available slots per cache line
        # as 0 bits and occupied slots as 1 bits
        self.mem_pool = [0] * VTABLE_ENTRIES

        # number of memory slots used (useful for lfu eviction policy)
        self.used_mem_slots = 0

        # dictionary storing the value table index, bitmap and counter/validity
        # register index in the P4 switch that corresponds to each key
        self.key_map = {}

        self.setup()

        #self.out_of_band_test()

    def inform_server(self):
        sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
        try:
            sock.connect(UNIX_CHANNEL)
        except socket.error as msg:
            #print('Error: Unable to contact server for cache operation completion')
            return

        sock.sendall(CACHE_INSERT_COMPLETE)

    # reports the value of counters for each cached key
    # (used only for debugging purposes)
    def report_counters(self):
        for key, val in self.key_map.items():
            vt_idx, bitmap, key_idx = val

            res = self.controller.counter_read(CACHED_KEYS_COUNTER, key_idx)
            if res != 0:
                print("[COUNTER] key = " + key + " [ " + str(res.packets) +
                      " ]")

    # periodically reset registers pertaining to query statistics module of the
    # P4 switch (count-min sketch registers, bloom filters and counters)
    def periodic_registers_reset(self):
        t = threading.Timer(STATISTICS_REFRESH_INTERVAL,
                            self.periodic_registers_reset)
        t.daemon = True
        t.start()

        # before reseting registers check if the cache is utilized above a
        # threshold (e.g 80%) and evict keys using lfu policy if needed
        self.cache_lfu_eviction(threshold=0.8, sampling=0.2, to_remove=0.5)

        # reset bloom filter related registers
        for i in range(BLOOMF_REGISTERS_NUM):
            self.controller.register_reset(BLOOMF_REG_PREFIX + str(i + 1))

        # reset count min sketch related registers
        for i in range(SKETCH_REGISTERS_NUM):
            self.controller.register_reset(SKETCH_REG_PREFIX + str(i + 1))

        # reset counter register storing the query frequency of each cached item
        self.controller.counter_reset(CACHED_KEYS_COUNTER)

        print("[INFO]: Reset query statistics registers.")

    # the controller periodically checks if the memory used has exceeded a given threshold
    # (e.g 80 %) and if that is the case then it evicts keys according to an approximated
    # LFU policy inspired by REDIS (https://redis.io/topics/lru-cache))
    def cache_lfu_eviction(self, threshold=0.8, sampling=0.2, to_remove=0.5):

        # if the threshold has not been surpassed then nothing to do
        if self.used_mem_slots <= (threshold * len(self.mem_pool) *
                                   VTABLE_SLOT_SIZE):
            return

        n_samples = int(sampling * len(self.key_map.items()))

        samples = random.sample(self.key_map.items(), n_samples)

        # read the counter for each sample and store them in an array
        evict_list = []
        for key, val in samples:
            x, y, cnt_idx = self.key_map[key]
            counter = self.controller.counter_read(CACHED_KEYS_COUNTER,
                                                   cnt_idx).packets
            evict_list.append((key, counter))

        # sort the array and pick the smallest K-th counters and evict their keys
        # (this could be achieved more optimally by using quickselect)
        import operator
        evict_list.sort(key=operator.itemgetter(1))

        for i in range(int(to_remove * n_samples)):
            curr = evict_list[i]
            self.evict(curr[0])

    def setup(self):
        if self.cpu_port:
            self.controller.mirroring_add(CONTROLLER_MIRROR_SESSION,
                                          self.cpu_port)

        # create custom hash functions for count min sketch and bloom filters
        self.set_crc_custom_hashes()
        self.create_hashes()

        # set a daemon to periodically reset registers
        self.periodic_registers_reset()

        # spawn new thread to serve incoming udp connections
        # (i.e hot reports from the switch)
        #udp_t = threading.Thread(target=self.hot_reports_loop)
        #udp_t.start()

    def set_crc_custom_hashes(self):
        i = 0
        for custom_crc32, width in sorted(self.custom_calcs.items()):
            self.controller.set_crc32_parameters(custom_crc32,
                                                 crc32_polinomials[i],
                                                 0xffffffff, 0xffffffff, True,
                                                 True)
            i += 1

    def create_hashes(self):
        self.hashes = []
        for i in range(self.sketch_register_num):
            self.hashes.append(
                Crc(32, crc32_polinomials[i], True, 0xffffffff, True,
                    0xffffffff))

    # set a static allocation scheme for l2 forwarding where the mac address of
    # each host is associated with the port connecting this host to the switch
    def set_forwarding_table(self):
        for host in self.topo.get_hosts_connected_to(self.sw_name):
            port = self.topo.node_to_node_port_num(self.sw_name, host)
            host_mac = self.topo.get_host_mac(host)
            self.controller.table_add("l2_forward", "set_egress_port",
                                      [str(host_mac)], [str(port)])

    def set_value_tables(self):
        for i in range(self.vtables_num):
            self.controller.table_add("vtable_" + str(i),
                                      "process_array_" + str(i), ['1'], [])

    # this function manages the mapping between between slots in register arrays
    # and the cached items by implementing the First Fit algorithm described in
    # Memory Management section of 4.4.2 (netcache paper)
    def first_fit(self, key, value_size):

        n_slots = (value_size / (VTABLE_SLOT_SIZE + 1)) + 1
        if value_size <= 0:
            return None
        if key in self.key_map:
            return None

        for idx in range(len(self.mem_pool)):
            old_bitmap = self.mem_pool[idx]
            n_zeros = 8 - bin(old_bitmap).count("1")

            if n_zeros >= n_slots:
                cnt = 0
                bitmap = 0
                for i in reversed(range(8)):
                    if cnt >= n_slots:
                        break

                    if not self.bit_is_set(old_bitmap, i):
                        bitmap = bitmap | (1 << i)
                        cnt += 1

                # mark last n_slots 0 bits as 1 bits because we assigned
                # them to the new key and they are now allocated
                self.mem_pool[idx] = old_bitmap | bitmap

                self.used_mem_slots += bin(bitmap).count("1")

                return (idx, bitmap)

        return None

    # converts a list of 1s and 0s represented as strings and converts it
    # to a bitmap using bitwise operations (this intermediate representation
    # of a list of 1s and 0s is used to avoid low level bitwise logic inside
    # core implementation logic)
    def convert_to_bitmap(self, strlist, bitmap_len):
        bitmap = 0
        # supports only bitmaps with multiple of 8 bits size
        if bitmap_len % 8 != 0:
            return bitmap
        for i in strlist:
            bitmap = bitmap << 1
            bitmap = bitmap | int(i)

        return bitmap

    # this function checks whether the k-th bit of a given number is set
    def bit_is_set(self, n, k):
        if n & (1 << k):
            return True
        else:
            return False

    # given a key and its associated value, we update the lookup table on
    # the switch and we also update the value registers with the value
    # given as argument (stored in multiple slots)
    def insert(self, key, value, cont=True):
        # find where to put the value for given key
        mem_info = self.first_fit(key, len(value))

        # if key already exists or not space available then stop
        if mem_info == None:
            return

        vt_index, bitmap = mem_info

        # keep track of number of bytes of the value written so far
        cnt = 0

        # store the value of the key in the vtables of the switch while
        # incrementally storing a part of the value at each value table
        # if the correspoding bit of the bitmap is set
        for i in range(self.vtables_num):

            if self.bit_is_set(bitmap, self.vtables_num - i - 1):
                partial_val = value[cnt:cnt + VTABLE_SLOT_SIZE]
                self.controller.register_write(VTABLE_NAME_PREFIX + str(i),
                                               vt_index,
                                               self.str_to_int(partial_val))

                cnt += VTABLE_SLOT_SIZE

        # allocate an id from the pool to index the counter and validity register
        # (we take the last element of list because in python list is implemented
        # to optimize for inserting and removing elements from the end of the list)
        key_index = self.ids_pool.pop()

        # add the new key to the cache lookup table of the p4 switch
        self.controller.table_add(
            NETCACHE_LOOKUP_TABLE, "set_lookup_metadata",
            [str(self.str_to_int(key))],
            [str(bitmap), str(vt_index),
             str(key_index)])

        # mark cache entry for this key as valid
        self.controller.register_write("cache_status", key_index, 1)

        self.key_map[key] = vt_index, bitmap, key_index

        # inform the server about the successful cache insertion
        if cont:
            self.inform_server()

        print("Inserted key-value pair to cache: (" + key + "," + value + ")")

    # converts a string to a bytes representation and afterwards returns
    # its integer representation of width specified by argument int_width
    # (seems hacky due to restriction to use python2.7)
    def str_to_int(self, x, int_width=VTABLE_SLOT_SIZE):
        if len(x) > int_width:
            print "Error: Overflow while converting string to int"

        # add padding with 0x00 if input string size less than int_width
        bytearr = bytearray(int_width - len(x))
        bytearr.extend(x.encode('utf-8'))
        return struct.unpack(">Q", bytearr)[0]

    # given an arbitrary sized integer, the max width (in bits) of the integer
    # it returns the string representation of the number (also stripping it of
    # any '0x00' characters) (network byte order is assumed)
    def int_to_packed(self, int_val, max_width=128, word_size=32):
        num_words = max_width / word_size
        words = self.int_to_words(int_val, num_words, word_size)

        fmt = '>%dI' % (num_words)
        return struct.pack(fmt, *words).strip('\x00')

    # split up an arbitrary sized integer to words (needed to hack
    # around struct.pack limitation to convert to byte any integer
    # greater than 8 bytes)
    def int_to_words(self, int_val, num_words, word_size):
        max_int = 2**(word_size * num_words) - 1
        max_word_size = 2**word_size - 1
        words = []
        for _ in range(num_words):
            word = int_val & max_word_size
            words.append(int(word))
            int_val >>= word_size
        words.reverse()
        return words

    # update the value of the given key with the new value given as argument
    # (by allowing updates also to be done by the controller, the client is
    # also able to update keys with values bigger than the previous one)
    # in netcache paper this restriction is not resolved
    def update(self, key, value):
        # if key is not in cache then nothing to do
        if key not in self.key_map:
            return

        # update key-value pair by removing old pair and inserting new one
        self.evict(key)
        self.insert(key, value)

    # evict given key from the cache by deleting its associated entries in
    # action tables of the switch, by deallocating its memory space and by
    # marking the cache entry as valid once the deletion is completed
    def evict(self, key):

        if key not in self.key_map:
            return

        # delete entry from the lookup_table
        entry_handle = self.controller.get_handle_from_match(
            NETCACHE_LOOKUP_TABLE, [
                str(self.str_to_int(key)),
            ])

        if entry_handle is not None:
            self.controller.table_delete(NETCACHE_LOOKUP_TABLE, entry_handle)

        # delete mapping of key from controller's dictionary
        vt_idx, bitmap, key_idx = self.key_map[key]
        del self.key_map[key]

        # deallocate space from memory pool
        self.mem_pool[vt_idx] = self.mem_pool[vt_idx] ^ bitmap
        self.used_mem_slots = self.used_mem_slots - bin(bitmap).count("1")

        # free the id used to index the validity/counter register and append
        # it back to the id pool of the controller
        self.ids_pool.append(key_idx)

        # mark cache entry as valid again (should be the last thing to do)
        self.controller.register_write("cache_status", key_idx, 1)

    # used for testing purposes and static population of cache
    def dummy_populate_vtables(self):
        test_values_l = [
            "alpha", "beta", "gamma", "delta", "epsilon", "zeta", "hita",
            "theta", "yiota", "kappa", "lambda", "meta"
        ]
        test_keys_l = [
            "one", "two", "three", "four", "five", "six", "seven", "eight",
            "nine", "ten", "eleven", "twelve"
        ]
        cnt = 0
        for i in range(11):
            self.insert(test_keys_l[i], test_values_l[i], False)

    # handling reports from the switch corresponding to hot keys, updates to
    # key-value pairs or deletions - this function receives a packet, extracts
    # its netcache header and manipulates cache based on the operation field
    # of the netcache header (callback function)
    def recv_switch_updates(self, pkt):
        print("Received message from switch")

        # extract netcache header information
        if pkt.haslayer(UDP):
            ncache_header = NetcacheHeader(pkt[UDP].payload)
        elif pkt.haslayer(TCP):
            ncache_header = NetcacheHeader(pkt[TCP].payload)

        key = self.int_to_packed(ncache_header.key, max_width=128)
        value = self.int_to_packed(ncache_header.value, max_width=1024)

        op = ncache_header.op

        if op == NETCACHE_HOT_READ_QUERY:
            print("Received hot report for key = " + key)
            # if the netcache header has null value or if the "hot key"
            # reported doesn't exist then do not update cache
            if ncache_header.op == NETCACHE_KEY_NOT_FOUND:
                return

            self.insert(key, value)

        elif op == NETCACHE_DELETE_COMPLETE:
            print("Received query to delete key = " + key)
            self.evict(key)

        elif op == NETCACHE_UPDATE_COMPLETE:
            print("Received query to update key = " + key)
            self.update(key, value)

        else:
            print("Error: unrecognized operation field of netcache header")

    # sniff infinitely the interface connected to the P4 switch and when a valid netcache
    # packet is captured, handle the packet via a callback to recv_switch_updates function
    def hot_reports_loop(self):
        cpu_port_intf = str(self.topo.get_cpu_port_intf(self.sw_name))
        sniff(iface=cpu_port_intf,
              prn=self.recv_switch_updates,
              filter="port 50000")

    def main(self):
        self.set_forwarding_table()
        self.set_value_tables()
        self.dummy_populate_vtables()
        self.hot_reports_loop()
class L2Controller(object):
    def __init__(self, sw_name):
        self.sw_name = sw_name
        self.thrift_port = 9090
        self.cpu_port = 2
        self.controller = SimpleSwitchAPI(self.thrift_port)

        self.init()

    def init(self):
        self.controller.reset_state()
        self.add_mirror()

    def add_mirror(self):
        if self.cpu_port:
            self.controller.mirroring_add(100, self.cpu_port)

    def fill_mac_query_table_test(self):
        self.controller.table_add("mac_forward", "modify_egress_spec",
                                  ['00:00:0a:00:00:01'], ['1'])
        self.controller.table_add("mac_forward", "modify_egress_spec",
                                  ['00:00:0a:00:00:02'], ['2'])
        self.controller.table_add("mac_forward", "modify_egress_spec",
                                  ['00:00:0a:00:00:03'], ['3'])
        self.controller.table_add("mac_forward", "modify_egress_spec",
                                  ['00:00:0a:00:00:04'], ['4'])

    def mac_learn(self, learning_data):
        for mac_addr, ingress_port in learning_data:
            print "mac: %012X ingress_port: %s " % (mac_addr, ingress_port)
            self.controller.table_add("mac_query", "set_mac_in",
                                      [str(mac_addr)])
            self.controller.table_add("mac_forward", "modify_egress_spec",
                                      [str(mac_addr)], [str(ingress_port)])

    def ipv6_learn(self, learning_data):
        for target_address, index in learning_data:
            print "target_address: %012X index: %s " % (target_address, index)
            self.controller.table_add("target_address_query",
                                      "set_target_address_in",
                                      [str(target_address)], [str(index)])

    def unpack_digest(self, msg, num_samples):
        digest = []
        print len(msg), num_samples
        starting_index = 32
        for sample in range(num_samples):
            mac0, mac1, ingress_port = struct.unpack(
                ">LHH", msg[starting_index:starting_index + 8])
            starting_index += 8
            mac_addr = (mac0 << 16) + mac1
            digest.append((mac_addr, ingress_port))

        return digest

    def recv_msg_digest(self, msg):
        topic, device_id, ctx_id, list_id, buffer_id, num = struct.unpack(
            "<iQiiQi", msg[:32])
        digest = self.unpack_digest(msg, num)
        print "digest:", digest, " digest len:", len(digest)
        self.mac_learn(digest)

        self.controller.client.bm_learning_ack_buffer(ctx_id, list_id,
                                                      buffer_id)

    def run_digest_loop(self):
        sub = nnpy.Socket(nnpy.AF_SP, nnpy.SUB)
        sub.connect('ipc:///tmp/bmv2-0-notifications.ipc')
        sub.setsockopt(nnpy.SUB, nnpy.SUB_SUBSCRIBE, '')
        print "run_digest_loop"

        while True:
            msg = sub.recv()
            #print "msg:",msg
            self.recv_msg_digest(msg)

    def recv_msg_cpu(self, pkt):
        packet = Ether(str(pkt))
        if packet.type == 0x1234:
            cpu_header = CpuHeader(packet.payload)
            self.mac_learn([(cpu_header.macAddr, cpu_header.ingress_port)])

    def run_cpu_port_loop(self):
        cpu_port_intf = "docker0"
        sniff(iface=cpu_port_intf, prn=self.recv_msg_cpu)
class L2Controller(object):
    def __init__(self, sw_name):
        self.topo = Topology(db="topology.db")
        self.sw_name = sw_name
        self.thrift_port = self.topo.get_thrift_port(sw_name)
        self.cpu_port = self.topo.get_cpu_port_index(self.sw_name)
        self.controller = SimpleSwitchAPI(self.thrift_port)

        self.init()

    def init(self):
        self.controller.reset_state()
        self.add_boadcast_groups()
        self.add_mirror()

    def add_mirror(self):
        if self.cpu_port:
            self.controller.mirroring_add(100, self.cpu_port)

    def add_boadcast_groups(self):
        interfaces_to_port = self.topo[
            self.sw_name]["interfaces_to_port"].copy()
        # filter lo and cpu port
        interfaces_to_port.pop('lo', None)
        interfaces_to_port.pop(self.topo.get_cpu_port_intf(self.sw_name), None)

        mc_grp_id = 1
        rid = 0
        for ingress_port in interfaces_to_port.values():

            port_list = interfaces_to_port.values()[:]
            del (port_list[port_list.index(ingress_port)])

            #add multicast group
            self.controller.mc_mgrp_create(mc_grp_id)

            #add multicast node group
            handle = self.controller.mc_node_create(rid, port_list)

            #associate with mc grp
            self.controller.mc_node_associate(mc_grp_id, handle)

            #fill broadcast table
            self.controller.table_add("broadcast", "set_mcast_grp",
                                      [str(ingress_port)], [str(mc_grp_id)])

            mc_grp_id += 1
            rid += 1

    def learn_route(self, learning_data):
        for mac_addr, ingress_port in learning_data:
            print "mac: %012X ingress_port: %s " % (mac_addr, ingress_port)
            self.controller.table_add("smac", "NoAction", [str(mac_addr)])
            self.controller.table_add("dmac", "forward", [str(mac_addr)],
                                      [str(ingress_port)])

    def learn_connection(self, srcA, dstA, srcP, dstP):
        print("========== UPDATING CONNECTION ==========")
        connection = srcA
        connection = connection << 32
        connection = connection | dstA
        connection = connection << 16
        connection = connection | srcP
        connection = connection << 16
        connection = connection | dstP
        self.controller.table_add("tcp_forward", "NoAction", [str(connection)],
                                  [])

        connection = dstA
        connection = connection << 32
        connection = connection | srcA
        connection = connection << 16
        connection = connection | dstP
        connection = connection << 16
        connection = connection | srcP
        self.controller.table_add("tcp_forward", "NoAction", [str(connection)],
                                  [])

        print("========== UPDATE FINISHED ==========")

    def recv_msg_cpu(self, pkt):
        packet = Ether(str(pkt))

        if packet.type == 0x1234:
            learning = CpuRoute(packet.payload)
            print("got a packet of type route")
            self.learn_route([(learning.macAddr, learning.ingress_port)])
        if packet.type == 0xF00D:
            learning = CpuCookie(packet.payload)
            print("got a packet of type cookie")
            self.learn_connection(learning.srcAddr, learning.dstAddr,
                                  learning.srcPort, learning.dstPort)

    def run_cpu_port_loop(self):
        cpu_port_intf = str(
            self.topo.get_cpu_port_intf(self.sw_name).replace("eth0", "eth1"))
        sniff(iface=cpu_port_intf, prn=self.recv_msg_cpu)