示例#1
0
class NCacheController(object):
    def __init__(self, sw_name):
        self.topo = Topology(db="./topology.db")
        self.sw_name = sw_name
        self.thrift_port = self.topo.get_thrift_port(self.sw_name)
        self.cpu_port = self.topo.get_cpu_port_index(self.sw_name)
        self.controller = SimpleSwitchAPI(self.thrift_port)

        self.custom_calcs = self.controller.get_custom_crc_calcs()
        self.sketch_register_num = len(self.custom_calcs)

        self.setup()

    def setup(self):
        if self.cpu_port:
            self.controller.mirroring_add(CONTROLLER_MIRROR_SESSION,
                                          self.cpu_port)

    # set a static allocation scheme for l2 forwarding where the mac address of
    # each host is associated with the port connecting this host to the switch
    def set_forwarding_table(self):
        for host in self.topo.get_hosts_connected_to(self.sw_name):
            port = self.topo.node_to_node_port_num(self.sw_name, host)
            host_mac = self.topo.get_host_mac(host)
            print str(host_mac) + str(port)
            self.controller.table_add("l2_forward", "set_egress_port",
                                      [str(host_mac)], [str(port)])

    def main(self):
        self.set_forwarding_table()
class LearningSwitchControllerApp(object):

    def __init__(self, switchName):
        self.topo = Topology(db="topology.db")
        self.switchName = switchName
        self.thrift_port = self.topo.get_thrift_port(switchName)
        self.cpu_port = self.topo.get_cpu_port_index(self.switchName)
        self.controller = SimpleSwitchAPI(self.thrift_port)

        self.init()

    def init(self):
        self.controller.reset_state()
        self.add_mcast_grp()
        self.add_mirror()

    def add_mirror(self):
        if self.cpu_port:
            self.controller.mirroring_add(MIRROR_SESSION_ID, self.cpu_port)

    def add_mcast_grp(self):
        interfaces_to_port = self.topo[self.switchName]["interfaces_to_port"].copy()
        # filter lo and cpu port
        interfaces_to_port.pop('lo', None)
        interfaces_to_port.pop(self.topo.get_cpu_port_intf(self.switchName), None)

        mc_grp_id = 1
        rid = 0
        # add multicast group
        self.controller.mc_mgrp_create(mc_grp_id)
        port_list = interfaces_to_port.values()[:]
        # add multicast node group
        handle = self.controller.mc_node_create(rid, port_list)
        # associate with mc grp
        self.controller.mc_node_associate(mc_grp_id, handle)

    def learn(self, learningData):
        for macAddr, ingressPort in learningData:
            print("macAddr: %012X ingressPort: %s ", macAddr, ingressPort)
            self.controller.table_add("srcMacAddr", "NoAction", [str(macAddr)])
            self.controller.table_add("dstMacAddr", "forward", [
                                      str(macAddr)], [str(ingressPort)])

    def recv_msg_cpu(self, pkt):

        packet = Ether(str(pkt))
        if packet.type == L2_LEARN_ETHER_TYPE:
            cpu_header = CpuHeader(bytes(packet.payload))
            self.learn([(cpu_header.macAddr, cpu_header.ingressPort)])

    def run_cpu_port_loop(self):

        cpu_port_intf = str(self.topo.get_cpu_port_intf(
            self.switchName).replace("eth0", "eth1"))
        sniff(iface=cpu_port_intf, prn=self.recv_msg_cpu)
class L2Controller(object):
    def __init__(self, sw_name):
        #self.topo = Topology(db="topology.db")
        self.sw_name = sw_name
        self.thrift_port = 9090
        #self.thrift_port = self.topo.get_thrift_port(sw_name)
        #self.cpu_port =  self.topo.get_cpu_port_index(self.sw_name)
        self.cpu_port = 2
        self.controller = SimpleSwitchAPI(self.thrift_port)

        self.init()

    def init(self):
        self.controller.reset_state()
        #self.add_boadcast_groups()
        self.add_mirror()
        #self.fill_table_test()

    def add_mirror(self):
        if self.cpu_port:
            self.controller.mirroring_add(100, self.cpu_port)

    def read_register(self):
        ns_recv = self.controller.register_read("ns_recv")
        na_recv = self.controller.register_read("na_recv")
        ns_filter = self.controller.register_read("ns_filter")
        na_filter = self.controller.register_read("na_filter")
        ns_recv_no_zero = []
        for x in ns_recv:
            if x != 0:
                ns_recv_no_zero.append(x)
        na_recv_no_zero = []
        for x in na_recv:
            if x != 0:
                na_recv_no_zero.append(x)
        ns_filter_no_zero = []
        for x in ns_filter:
            if x != 0:
                ns_filter_no_zero.append(x)
        na_filter_no_zero = []
        for x in na_filter:
            if x != 0:
                na_filter_no_zero.append(x)
        print "ns_recv: ", len(ns_recv_no_zero), ns_recv_no_zero
        print "ns_filter: ", len(ns_filter_no_zero), ns_filter_no_zero
        print "na_recv: ", len(na_recv_no_zero), na_recv_no_zero
        print "na_filter: ", len(na_filter_no_zero), na_filter_no_zero
示例#4
0
class L2Controller(object):
    def __init__(self, sw_name):
        self.sw_name = sw_name
        self.thrift_port = 9090
        self.cpu_port = 2
        self.controller = SimpleSwitchAPI(self.thrift_port)

        self.init()

    def init(self):
        self.controller.reset_state()
        self.add_mirror()

    def add_mirror(self):
        if self.cpu_port:
            self.controller.mirroring_add(100, self.cpu_port)

    def fill_mac_query_table_test(self):
        self.controller.table_add("mac_forward", "modify_egress_spec",
                                  ['00:00:0a:00:00:01'], ['1'])
        self.controller.table_add("mac_forward", "modify_egress_spec",
                                  ['00:00:0a:00:00:02'], ['2'])
        self.controller.table_add("mac_forward", "modify_egress_spec",
                                  ['00:00:0a:00:00:03'], ['3'])
        self.controller.table_add("mac_forward", "modify_egress_spec",
                                  ['00:00:0a:00:00:04'], ['4'])

    def mac_learn(self, learning_data):
        for mac_addr, ingress_port in learning_data:
            print "mac: %012X ingress_port: %s " % (mac_addr, ingress_port)
            self.controller.table_add("mac_query", "set_mac_in",
                                      [str(mac_addr)])
            self.controller.table_add("mac_forward", "modify_egress_spec",
                                      [str(mac_addr)], [str(ingress_port)])

    def ipv6_learn(self, learning_data):
        for target_address, index in learning_data:
            print "target_address: %012X index: %s " % (target_address, index)
            self.controller.table_add("target_address_query",
                                      "set_target_address_in",
                                      [str(target_address)], [str(index)])

    def unpack_digest(self, msg, num_samples):
        digest = []
        print len(msg), num_samples
        starting_index = 32
        for sample in range(num_samples):
            mac0, mac1, ingress_port = struct.unpack(
                ">LHH", msg[starting_index:starting_index + 8])
            starting_index += 8
            mac_addr = (mac0 << 16) + mac1
            digest.append((mac_addr, ingress_port))

        return digest

    def recv_msg_digest(self, msg):
        topic, device_id, ctx_id, list_id, buffer_id, num = struct.unpack(
            "<iQiiQi", msg[:32])
        digest = self.unpack_digest(msg, num)
        print "digest:", digest, " digest len:", len(digest)
        self.mac_learn(digest)

        self.controller.client.bm_learning_ack_buffer(ctx_id, list_id,
                                                      buffer_id)

    def run_digest_loop(self):
        sub = nnpy.Socket(nnpy.AF_SP, nnpy.SUB)
        sub.connect('ipc:///tmp/bmv2-0-notifications.ipc')
        sub.setsockopt(nnpy.SUB, nnpy.SUB_SUBSCRIBE, '')
        print "run_digest_loop"

        while True:
            msg = sub.recv()
            #print "msg:",msg
            self.recv_msg_digest(msg)

    def recv_msg_cpu(self, pkt):
        packet = Ether(str(pkt))
        if packet.type == 0x1234:
            cpu_header = CpuHeader(packet.payload)
            self.mac_learn([(cpu_header.macAddr, cpu_header.ingress_port)])

    def run_cpu_port_loop(self):
        cpu_port_intf = "docker0"
        sniff(iface=cpu_port_intf, prn=self.recv_msg_cpu)
示例#5
0
class L2Controller(object):

    def __init__(self, sw_name):

        self.topo = Topology(db="topology.db")
        self.sw_name = sw_name
        self.thrift_port = self.topo.get_thrift_port(sw_name)
        self.cpu_port =  self.topo.get_cpu_port_index(self.sw_name)
        self.controller = SimpleSwitchAPI(self.thrift_port)

        self.init()

    def init(self):

        self.controller.reset_state()
        self.add_boadcast_groups()
        self.add_mirror()
        #self.fill_table_test()

    def add_mirror(self):

        if self.cpu_port:
            self.controller.mirroring_add(100, self.cpu_port)

    def add_boadcast_groups(self):

        interfaces_to_port = self.topo[self.sw_name]["interfaces_to_port"].copy()
        #filter lo and cpu port
        interfaces_to_port.pop('lo', None)
        interfaces_to_port.pop(self.topo.get_cpu_port_intf(self.sw_name), None)

        mc_grp_id = 1
        rid = 0
        for ingress_port in interfaces_to_port.values():

            port_list = interfaces_to_port.values()[:]
            del(port_list[port_list.index(ingress_port)])

            #add multicast group
            self.controller.mc_mgrp_create(mc_grp_id)

            #add multicast node group
            handle = self.controller.mc_node_create(rid, port_list)

            #associate with mc grp
            self.controller.mc_node_associate(mc_grp_id, handle)

            #fill broadcast table
            self.controller.table_add("broadcast", "set_mcast_grp", [str(ingress_port)], [str(mc_grp_id)])

            mc_grp_id +=1
            rid +=1

    def fill_table_test(self):
        self.controller.table_add("dmac", "forward", ['00:00:0a:00:00:01'], ['1'])
        self.controller.table_add("dmac", "forward", ['00:00:0a:00:00:02'], ['2'])
        self.controller.table_add("dmac", "forward", ['00:00:0a:00:00:03'], ['3'])
        self.controller.table_add("dmac", "forward", ['00:00:0a:00:00:04'], ['4'])


    def learn(self, learning_data):

        for mac_addr, ingress_port in  learning_data:
            print "mac: %012X ingress_port: %s " % (mac_addr, ingress_port)
            self.controller.table_add("smac", "NoAction", [str(mac_addr)])
            self.controller.table_add("dmac", "forward", [str(mac_addr)], [str(ingress_port)])

    def unpack_digest(self, msg, num_samples):

        digest = []
        print len(msg), num_samples
        starting_index = 32
        for sample in range(num_samples):
            mac0, mac1, ingress_port = struct.unpack(">LHH", msg[starting_index:starting_index+8])
            starting_index +=8
            mac_addr = (mac0 << 16) + mac1
            digest.append((mac_addr, ingress_port))

        return digest

    def recv_msg_digest(self, msg):

        topic, device_id, ctx_id, list_id, buffer_id, num = struct.unpack("<iQiiQi",
                                                                          msg[:32])
        digest = self.unpack_digest(msg, num)
        self.learn(digest)

        #Acknowledge digest
        self.controller.client.bm_learning_ack_buffer(ctx_id, list_id, buffer_id)


    def run_digest_loop(self):

        sub = nnpy.Socket(nnpy.AF_SP, nnpy.SUB)
        notifications_socket = self.controller.client.bm_mgmt_get_info().notifications_socket
        sub.connect(notifications_socket)
        sub.setsockopt(nnpy.SUB, nnpy.SUB_SUBSCRIBE, '')

        while True:
            msg = sub.recv()
            self.recv_msg_digest(msg)

    def recv_msg_cpu(self, pkt):

        packet = Ether(str(pkt))

        if packet.type == 0x1234:
            cpu_header = CpuHeader(packet.payload)
            self.learn([(cpu_header.macAddr, cpu_header.ingress_port)])

    def run_cpu_port_loop(self):

        cpu_port_intf = str(self.topo.get_cpu_port_intf(self.sw_name).replace("eth0", "eth1"))
        sniff(iface=cpu_port_intf, prn=self.recv_msg_cpu)
示例#6
0
class NCacheController(object):
    def __init__(self, sw_name, vtables_num=8):
        self.topo = Topology(db="../p4/topology.db")
        self.sw_name = sw_name
        self.thrift_port = self.topo.get_thrift_port(self.sw_name)
        self.cpu_port = self.topo.get_cpu_port_index(self.sw_name)
        self.controller = SimpleSwitchAPI(self.thrift_port)

        self.custom_calcs = self.controller.get_custom_crc_calcs()
        self.sketch_register_num = len(self.custom_calcs)

        self.vtables = []
        self.vtables_num = vtables_num

        # create a pool of ids (as much as the total amount of keys)
        # this pool will be used to assign index to keys which will be
        # used to index the cached key counter and the validity register
        self.ids_pool = range(0, VTABLE_ENTRIES * VTABLE_SLOT_SIZE)

        # array of bitmap, which marks available slots per cache line
        # as 0 bits and occupied slots as 1 bits
        self.mem_pool = [0] * VTABLE_ENTRIES

        # number of memory slots used (useful for lfu eviction policy)
        self.used_mem_slots = 0

        # dictionary storing the value table index, bitmap and counter/validity
        # register index in the P4 switch that corresponds to each key
        self.key_map = {}

        self.setup()

        #self.out_of_band_test()

    def inform_server(self):
        sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
        try:
            sock.connect(UNIX_CHANNEL)
        except socket.error as msg:
            #print('Error: Unable to contact server for cache operation completion')
            return

        sock.sendall(CACHE_INSERT_COMPLETE)

    # reports the value of counters for each cached key
    # (used only for debugging purposes)
    def report_counters(self):
        for key, val in self.key_map.items():
            vt_idx, bitmap, key_idx = val

            res = self.controller.counter_read(CACHED_KEYS_COUNTER, key_idx)
            if res != 0:
                print("[COUNTER] key = " + key + " [ " + str(res.packets) +
                      " ]")

    # periodically reset registers pertaining to query statistics module of the
    # P4 switch (count-min sketch registers, bloom filters and counters)
    def periodic_registers_reset(self):
        t = threading.Timer(STATISTICS_REFRESH_INTERVAL,
                            self.periodic_registers_reset)
        t.daemon = True
        t.start()

        # before reseting registers check if the cache is utilized above a
        # threshold (e.g 80%) and evict keys using lfu policy if needed
        self.cache_lfu_eviction(threshold=0.8, sampling=0.2, to_remove=0.5)

        # reset bloom filter related registers
        for i in range(BLOOMF_REGISTERS_NUM):
            self.controller.register_reset(BLOOMF_REG_PREFIX + str(i + 1))

        # reset count min sketch related registers
        for i in range(SKETCH_REGISTERS_NUM):
            self.controller.register_reset(SKETCH_REG_PREFIX + str(i + 1))

        # reset counter register storing the query frequency of each cached item
        self.controller.counter_reset(CACHED_KEYS_COUNTER)

        print("[INFO]: Reset query statistics registers.")

    # the controller periodically checks if the memory used has exceeded a given threshold
    # (e.g 80 %) and if that is the case then it evicts keys according to an approximated
    # LFU policy inspired by REDIS (https://redis.io/topics/lru-cache))
    def cache_lfu_eviction(self, threshold=0.8, sampling=0.2, to_remove=0.5):

        # if the threshold has not been surpassed then nothing to do
        if self.used_mem_slots <= (threshold * len(self.mem_pool) *
                                   VTABLE_SLOT_SIZE):
            return

        n_samples = int(sampling * len(self.key_map.items()))

        samples = random.sample(self.key_map.items(), n_samples)

        # read the counter for each sample and store them in an array
        evict_list = []
        for key, val in samples:
            x, y, cnt_idx = self.key_map[key]
            counter = self.controller.counter_read(CACHED_KEYS_COUNTER,
                                                   cnt_idx).packets
            evict_list.append((key, counter))

        # sort the array and pick the smallest K-th counters and evict their keys
        # (this could be achieved more optimally by using quickselect)
        import operator
        evict_list.sort(key=operator.itemgetter(1))

        for i in range(int(to_remove * n_samples)):
            curr = evict_list[i]
            self.evict(curr[0])

    def setup(self):
        if self.cpu_port:
            self.controller.mirroring_add(CONTROLLER_MIRROR_SESSION,
                                          self.cpu_port)

        # create custom hash functions for count min sketch and bloom filters
        self.set_crc_custom_hashes()
        self.create_hashes()

        # set a daemon to periodically reset registers
        self.periodic_registers_reset()

        # spawn new thread to serve incoming udp connections
        # (i.e hot reports from the switch)
        #udp_t = threading.Thread(target=self.hot_reports_loop)
        #udp_t.start()

    def set_crc_custom_hashes(self):
        i = 0
        for custom_crc32, width in sorted(self.custom_calcs.items()):
            self.controller.set_crc32_parameters(custom_crc32,
                                                 crc32_polinomials[i],
                                                 0xffffffff, 0xffffffff, True,
                                                 True)
            i += 1

    def create_hashes(self):
        self.hashes = []
        for i in range(self.sketch_register_num):
            self.hashes.append(
                Crc(32, crc32_polinomials[i], True, 0xffffffff, True,
                    0xffffffff))

    # set a static allocation scheme for l2 forwarding where the mac address of
    # each host is associated with the port connecting this host to the switch
    def set_forwarding_table(self):
        for host in self.topo.get_hosts_connected_to(self.sw_name):
            port = self.topo.node_to_node_port_num(self.sw_name, host)
            host_mac = self.topo.get_host_mac(host)
            self.controller.table_add("l2_forward", "set_egress_port",
                                      [str(host_mac)], [str(port)])

    def set_value_tables(self):
        for i in range(self.vtables_num):
            self.controller.table_add("vtable_" + str(i),
                                      "process_array_" + str(i), ['1'], [])

    # this function manages the mapping between between slots in register arrays
    # and the cached items by implementing the First Fit algorithm described in
    # Memory Management section of 4.4.2 (netcache paper)
    def first_fit(self, key, value_size):

        n_slots = (value_size / (VTABLE_SLOT_SIZE + 1)) + 1
        if value_size <= 0:
            return None
        if key in self.key_map:
            return None

        for idx in range(len(self.mem_pool)):
            old_bitmap = self.mem_pool[idx]
            n_zeros = 8 - bin(old_bitmap).count("1")

            if n_zeros >= n_slots:
                cnt = 0
                bitmap = 0
                for i in reversed(range(8)):
                    if cnt >= n_slots:
                        break

                    if not self.bit_is_set(old_bitmap, i):
                        bitmap = bitmap | (1 << i)
                        cnt += 1

                # mark last n_slots 0 bits as 1 bits because we assigned
                # them to the new key and they are now allocated
                self.mem_pool[idx] = old_bitmap | bitmap

                self.used_mem_slots += bin(bitmap).count("1")

                return (idx, bitmap)

        return None

    # converts a list of 1s and 0s represented as strings and converts it
    # to a bitmap using bitwise operations (this intermediate representation
    # of a list of 1s and 0s is used to avoid low level bitwise logic inside
    # core implementation logic)
    def convert_to_bitmap(self, strlist, bitmap_len):
        bitmap = 0
        # supports only bitmaps with multiple of 8 bits size
        if bitmap_len % 8 != 0:
            return bitmap
        for i in strlist:
            bitmap = bitmap << 1
            bitmap = bitmap | int(i)

        return bitmap

    # this function checks whether the k-th bit of a given number is set
    def bit_is_set(self, n, k):
        if n & (1 << k):
            return True
        else:
            return False

    # given a key and its associated value, we update the lookup table on
    # the switch and we also update the value registers with the value
    # given as argument (stored in multiple slots)
    def insert(self, key, value, cont=True):
        # find where to put the value for given key
        mem_info = self.first_fit(key, len(value))

        # if key already exists or not space available then stop
        if mem_info == None:
            return

        vt_index, bitmap = mem_info

        # keep track of number of bytes of the value written so far
        cnt = 0

        # store the value of the key in the vtables of the switch while
        # incrementally storing a part of the value at each value table
        # if the correspoding bit of the bitmap is set
        for i in range(self.vtables_num):

            if self.bit_is_set(bitmap, self.vtables_num - i - 1):
                partial_val = value[cnt:cnt + VTABLE_SLOT_SIZE]
                self.controller.register_write(VTABLE_NAME_PREFIX + str(i),
                                               vt_index,
                                               self.str_to_int(partial_val))

                cnt += VTABLE_SLOT_SIZE

        # allocate an id from the pool to index the counter and validity register
        # (we take the last element of list because in python list is implemented
        # to optimize for inserting and removing elements from the end of the list)
        key_index = self.ids_pool.pop()

        # add the new key to the cache lookup table of the p4 switch
        self.controller.table_add(
            NETCACHE_LOOKUP_TABLE, "set_lookup_metadata",
            [str(self.str_to_int(key))],
            [str(bitmap), str(vt_index),
             str(key_index)])

        # mark cache entry for this key as valid
        self.controller.register_write("cache_status", key_index, 1)

        self.key_map[key] = vt_index, bitmap, key_index

        # inform the server about the successful cache insertion
        if cont:
            self.inform_server()

        print("Inserted key-value pair to cache: (" + key + "," + value + ")")

    # converts a string to a bytes representation and afterwards returns
    # its integer representation of width specified by argument int_width
    # (seems hacky due to restriction to use python2.7)
    def str_to_int(self, x, int_width=VTABLE_SLOT_SIZE):
        if len(x) > int_width:
            print "Error: Overflow while converting string to int"

        # add padding with 0x00 if input string size less than int_width
        bytearr = bytearray(int_width - len(x))
        bytearr.extend(x.encode('utf-8'))
        return struct.unpack(">Q", bytearr)[0]

    # given an arbitrary sized integer, the max width (in bits) of the integer
    # it returns the string representation of the number (also stripping it of
    # any '0x00' characters) (network byte order is assumed)
    def int_to_packed(self, int_val, max_width=128, word_size=32):
        num_words = max_width / word_size
        words = self.int_to_words(int_val, num_words, word_size)

        fmt = '>%dI' % (num_words)
        return struct.pack(fmt, *words).strip('\x00')

    # split up an arbitrary sized integer to words (needed to hack
    # around struct.pack limitation to convert to byte any integer
    # greater than 8 bytes)
    def int_to_words(self, int_val, num_words, word_size):
        max_int = 2**(word_size * num_words) - 1
        max_word_size = 2**word_size - 1
        words = []
        for _ in range(num_words):
            word = int_val & max_word_size
            words.append(int(word))
            int_val >>= word_size
        words.reverse()
        return words

    # update the value of the given key with the new value given as argument
    # (by allowing updates also to be done by the controller, the client is
    # also able to update keys with values bigger than the previous one)
    # in netcache paper this restriction is not resolved
    def update(self, key, value):
        # if key is not in cache then nothing to do
        if key not in self.key_map:
            return

        # update key-value pair by removing old pair and inserting new one
        self.evict(key)
        self.insert(key, value)

    # evict given key from the cache by deleting its associated entries in
    # action tables of the switch, by deallocating its memory space and by
    # marking the cache entry as valid once the deletion is completed
    def evict(self, key):

        if key not in self.key_map:
            return

        # delete entry from the lookup_table
        entry_handle = self.controller.get_handle_from_match(
            NETCACHE_LOOKUP_TABLE, [
                str(self.str_to_int(key)),
            ])

        if entry_handle is not None:
            self.controller.table_delete(NETCACHE_LOOKUP_TABLE, entry_handle)

        # delete mapping of key from controller's dictionary
        vt_idx, bitmap, key_idx = self.key_map[key]
        del self.key_map[key]

        # deallocate space from memory pool
        self.mem_pool[vt_idx] = self.mem_pool[vt_idx] ^ bitmap
        self.used_mem_slots = self.used_mem_slots - bin(bitmap).count("1")

        # free the id used to index the validity/counter register and append
        # it back to the id pool of the controller
        self.ids_pool.append(key_idx)

        # mark cache entry as valid again (should be the last thing to do)
        self.controller.register_write("cache_status", key_idx, 1)

    # used for testing purposes and static population of cache
    def dummy_populate_vtables(self):
        test_values_l = [
            "alpha", "beta", "gamma", "delta", "epsilon", "zeta", "hita",
            "theta", "yiota", "kappa", "lambda", "meta"
        ]
        test_keys_l = [
            "one", "two", "three", "four", "five", "six", "seven", "eight",
            "nine", "ten", "eleven", "twelve"
        ]
        cnt = 0
        for i in range(11):
            self.insert(test_keys_l[i], test_values_l[i], False)

    # handling reports from the switch corresponding to hot keys, updates to
    # key-value pairs or deletions - this function receives a packet, extracts
    # its netcache header and manipulates cache based on the operation field
    # of the netcache header (callback function)
    def recv_switch_updates(self, pkt):
        print("Received message from switch")

        # extract netcache header information
        if pkt.haslayer(UDP):
            ncache_header = NetcacheHeader(pkt[UDP].payload)
        elif pkt.haslayer(TCP):
            ncache_header = NetcacheHeader(pkt[TCP].payload)

        key = self.int_to_packed(ncache_header.key, max_width=128)
        value = self.int_to_packed(ncache_header.value, max_width=1024)

        op = ncache_header.op

        if op == NETCACHE_HOT_READ_QUERY:
            print("Received hot report for key = " + key)
            # if the netcache header has null value or if the "hot key"
            # reported doesn't exist then do not update cache
            if ncache_header.op == NETCACHE_KEY_NOT_FOUND:
                return

            self.insert(key, value)

        elif op == NETCACHE_DELETE_COMPLETE:
            print("Received query to delete key = " + key)
            self.evict(key)

        elif op == NETCACHE_UPDATE_COMPLETE:
            print("Received query to update key = " + key)
            self.update(key, value)

        else:
            print("Error: unrecognized operation field of netcache header")

    # sniff infinitely the interface connected to the P4 switch and when a valid netcache
    # packet is captured, handle the packet via a callback to recv_switch_updates function
    def hot_reports_loop(self):
        cpu_port_intf = str(self.topo.get_cpu_port_intf(self.sw_name))
        sniff(iface=cpu_port_intf,
              prn=self.recv_switch_updates,
              filter="port 50000")

    def main(self):
        self.set_forwarding_table()
        self.set_value_tables()
        self.dummy_populate_vtables()
        self.hot_reports_loop()
class packetReceicer(threading.Thread):
    def __init__(self, sw_name, program):
        threading.Thread.__init__(self)
        if program == "f":
            self.topo = Topology(
                db="../p4src_flowsize/topology.db")  #set the topology
        elif program == "i":
            self.topo = Topology(
                db="../p4src_interval/topology.db")  #set the topology
        self.sw_name = sw_name
        self.thrift_port = self.topo.get_thrift_port(sw_name)
        self.cpu_port = self.topo.get_cpu_port_index(self.sw_name)
        self.controller = SimpleSwitchAPI(self.thrift_port)
        self.flow = {}
        self.flag = True
        self.init()

    def init(self):
        self.add_mirror()
        self.counter = 1
        self.logs = open("../switch_log/" + self.sw_name + ".log", "w")
        self.logs_info = open("../switch_log/" + self.sw_name + "_info.log",
                              "w")
        self.logs_info.write("SWITCH[" + self.sw_name + "]\n")
        self.logs.close()
        self.logs_info.close()

    def add_mirror(self):
        if self.cpu_port:
            self.controller.mirroring_add(
                100, self.cpu_port)  # correspond to the 100 in p4 code
            #is there any probability to increase the mirro port to add cpu port?

    def recv_msg_cpu(self, pkt):
        ## console output starts
        #print
        #print("["+self.sw_name+"] received packet number:"+str(self.counter))
        self.counter += 1
        cpu = CPU(str(pkt))
        #ls(cpu)

        ## console output ends
        type = (cpu.flags >> 2)
        if self.flag == True:
            logs = open("../switch_log/" + self.sw_name + ".log", "w")
            self.flag = False
            if type == 0:
                logs.write("flowsize information collecting\n")
            else:
                logs.write("interval information collecting\n")

            logs.close()

        self.gen_per_packet_log(cpu)
        self.collect_log(cpu)
        if (self.counter % 1000 == 0):
            self.gen_log()

    def gen_log(self):
        logs_info = open("../switch_log/" + self.sw_name + "_info.log", "a")
        logs_info.write("[flow number: " + str(len(self.flow)) + "]\n")
        change = lambda x: '.'.join(
            [str(x / (256**i) % 256) for i in range(3, -1, -1)])

        cnt = 0
        for i in self.flow:
            cnt += self.flow[i]["packnum"]
            tmp = i.split(":")
            tmp[0] = change(int(tmp[0]))
            tmp[1] = change(int(tmp[1]))
            tmp = " : ".join(tmp)
            logs_info.write("flow " + tmp + " ")

            logs_info.write(str(sorted(self.flow[i].items())))
            logs_info.write("\n")
        logs_info.write("[packet number sum:" + str(cnt) + "]\n\n")

        logs_info.close()

    def collect_log(self, cpu):
        flow_key = str(cpu.srcAddr) + ":" + str(cpu.dstAddr) + ":" + str(
            cpu.protocol) + ":" + str(cpu.srcPort) + ":" + str(cpu.dstPort)
        if self.flow.has_key(flow_key):
            self.flow[flow_key]["packnum"] += 1
            self.flow[flow_key][self.get_lev(cpu.delay)] += 1
        else:
            self.flow[flow_key]={"packnum":1,"0->1":0,"1->2":0,\
                "2->3":0,"3->4":0,"4->5":0,"5->6":0,"6->7":0\
                ,"7+":0}#"7->8":0,"8->9":0,"9+":0}
            self.flow[flow_key][self.get_lev(cpu.delay)] += 1

    def get_lev(self, delay):
        time_interval = 1000
        if delay < time_interval * 1:
            return "0->1"
        elif delay < time_interval * 2:
            return "1->2"
        elif delay < time_interval * 3:
            return "2->3"
        elif delay < time_interval * 4:
            return "3->4"
        elif delay < time_interval * 5:
            return "4->5"
        elif delay < time_interval * 6:
            return "5->6"
        elif delay < time_interval * 7:
            return "6->7"
        # elif delay<time_interval*8:
        #     return "7->8"
        # elif delay<time_interval*9:
        #     return "8->9"
        else:
            return "7+"

    def gen_per_packet_log(self, cpu):
        logs = open("../switch_log/" + self.sw_name + ".log", "a")
        change = lambda x: '.'.join(
            [str(x / (256**i) % 256) for i in range(3, -1, -1)])

        srcAddr = change(cpu.srcAddr)
        dstAddr = change(cpu.dstAddr)
        tmp_delay = str(cpu.delay)
        delay = tmp_delay[-9:-6] + "s " + tmp_delay[-6:-3] + "ms " + tmp_delay[
            -3:] + "us"
        tmp_interval = str(cpu.interval)
        interval = tmp_interval[-9:-6] + "s " + tmp_interval[
            -6:-3] + "ms " + tmp_interval[-3:] + "us"
        sketch_fg = (cpu.flags >> 1) & 0x1
        has_SFH = cpu.flags & 0x1
        type = (cpu.flags >> 2) & 0x1

        logs.write('{"switch name":"' + self.sw_name + '",')
        logs.write('"packet number":"' + str(self.counter - 1) +
                   '","packet_info":{')
        logs.write('"srcAddr":"' + str(srcAddr) + '",')
        logs.write('"dstAddr":"' + str(dstAddr) + '",')
        logs.write('"protocol":"' + str(cpu.protocol) + '",')
        logs.write('"srcPort":"' + str(cpu.srcPort) + '",')
        logs.write('"dstPort":"' + str(cpu.dstPort) + '",')
        logs.write('"delay ":"' + delay + '",')
        logs.write('"interval":"' + interval)
        logs.write('"timestamp":' + str(time.time()))
        if type == 0:
            logs.write('",' + '"using sketch":"' + str(sketch_fg) + '",')
            logs.write('"bring SFH":' + str(bool(has_SFH)))
        else:
            logs.write('",' + '"using sketch":"' + str(sketch_fg) + '",')
            logs.write('"bring MIH":' + str(bool(has_SFH)))
        logs.write(" }}\n")
        logs.close()

    def run_cpu_port_loop(self):
        cpu_port_intf = str(
            self.topo.get_cpu_port_intf(self.sw_name).replace("eth0", "eth1"))
        #the cpu has two ports   could use two thread to sniff
        print(cpu_port_intf)
        print
        print(sniff(iface=cpu_port_intf, prn=self.recv_msg_cpu))

    def run(self):
        self.run_cpu_port_loop()
class L2Controller(object):
    def __init__(self, sw_name):
        self.topo = Topology(db="topology.db")
        self.sw_name = sw_name
        self.thrift_port = self.topo.get_thrift_port(sw_name)
        self.cpu_port = self.topo.get_cpu_port_index(self.sw_name)
        self.controller = SimpleSwitchAPI(self.thrift_port)

        self.init()

    def init(self):
        self.controller.reset_state()
        self.add_boadcast_groups()
        self.add_mirror()

    def add_mirror(self):
        if self.cpu_port:
            self.controller.mirroring_add(100, self.cpu_port)

    def add_boadcast_groups(self):
        interfaces_to_port = self.topo[
            self.sw_name]["interfaces_to_port"].copy()
        # filter lo and cpu port
        interfaces_to_port.pop('lo', None)
        interfaces_to_port.pop(self.topo.get_cpu_port_intf(self.sw_name), None)

        mc_grp_id = 1
        rid = 0
        for ingress_port in interfaces_to_port.values():

            port_list = interfaces_to_port.values()[:]
            del (port_list[port_list.index(ingress_port)])

            #add multicast group
            self.controller.mc_mgrp_create(mc_grp_id)

            #add multicast node group
            handle = self.controller.mc_node_create(rid, port_list)

            #associate with mc grp
            self.controller.mc_node_associate(mc_grp_id, handle)

            #fill broadcast table
            self.controller.table_add("broadcast", "set_mcast_grp",
                                      [str(ingress_port)], [str(mc_grp_id)])

            mc_grp_id += 1
            rid += 1

    def learn_route(self, learning_data):
        for mac_addr, ingress_port in learning_data:
            print "mac: %012X ingress_port: %s " % (mac_addr, ingress_port)
            self.controller.table_add("smac", "NoAction", [str(mac_addr)])
            self.controller.table_add("dmac", "forward", [str(mac_addr)],
                                      [str(ingress_port)])

    def learn_connection(self, srcA, dstA, srcP, dstP):
        print("========== UPDATING CONNECTION ==========")
        connection = srcA
        connection = connection << 32
        connection = connection | dstA
        connection = connection << 16
        connection = connection | srcP
        connection = connection << 16
        connection = connection | dstP
        self.controller.table_add("tcp_forward", "NoAction", [str(connection)],
                                  [])

        connection = dstA
        connection = connection << 32
        connection = connection | srcA
        connection = connection << 16
        connection = connection | dstP
        connection = connection << 16
        connection = connection | srcP
        self.controller.table_add("tcp_forward", "NoAction", [str(connection)],
                                  [])

        print("========== UPDATE FINISHED ==========")

    def recv_msg_cpu(self, pkt):
        packet = Ether(str(pkt))

        if packet.type == 0x1234:
            learning = CpuRoute(packet.payload)
            print("got a packet of type route")
            self.learn_route([(learning.macAddr, learning.ingress_port)])
        if packet.type == 0xF00D:
            learning = CpuCookie(packet.payload)
            print("got a packet of type cookie")
            self.learn_connection(learning.srcAddr, learning.dstAddr,
                                  learning.srcPort, learning.dstPort)

    def run_cpu_port_loop(self):
        cpu_port_intf = str(
            self.topo.get_cpu_port_intf(self.sw_name).replace("eth0", "eth1"))
        sniff(iface=cpu_port_intf, prn=self.recv_msg_cpu)