示例#1
0
class NCacheController(object):
    def __init__(self, sw_name, vtables_num=8):
        self.topo = Topology(db="../p4/topology.db")
        self.sw_name = sw_name
        self.thrift_port = self.topo.get_thrift_port(self.sw_name)
        self.cpu_port = self.topo.get_cpu_port_index(self.sw_name)
        self.controller = SimpleSwitchAPI(self.thrift_port)

        self.custom_calcs = self.controller.get_custom_crc_calcs()
        self.sketch_register_num = len(self.custom_calcs)

        self.vtables = []
        self.vtables_num = vtables_num

        # create a pool of ids (as much as the total amount of keys)
        # this pool will be used to assign index to keys which will be
        # used to index the cached key counter and the validity register
        self.ids_pool = range(0, VTABLE_ENTRIES * VTABLE_SLOT_SIZE)

        # array of bitmap, which marks available slots per cache line
        # as 0 bits and occupied slots as 1 bits
        self.mem_pool = [0] * VTABLE_ENTRIES

        # number of memory slots used (useful for lfu eviction policy)
        self.used_mem_slots = 0

        # dictionary storing the value table index, bitmap and counter/validity
        # register index in the P4 switch that corresponds to each key
        self.key_map = {}

        self.setup()

        #self.out_of_band_test()

    def inform_server(self):
        sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
        try:
            sock.connect(UNIX_CHANNEL)
        except socket.error as msg:
            #print('Error: Unable to contact server for cache operation completion')
            return

        sock.sendall(CACHE_INSERT_COMPLETE)

    # reports the value of counters for each cached key
    # (used only for debugging purposes)
    def report_counters(self):
        for key, val in self.key_map.items():
            vt_idx, bitmap, key_idx = val

            res = self.controller.counter_read(CACHED_KEYS_COUNTER, key_idx)
            if res != 0:
                print("[COUNTER] key = " + key + " [ " + str(res.packets) +
                      " ]")

    # periodically reset registers pertaining to query statistics module of the
    # P4 switch (count-min sketch registers, bloom filters and counters)
    def periodic_registers_reset(self):
        t = threading.Timer(STATISTICS_REFRESH_INTERVAL,
                            self.periodic_registers_reset)
        t.daemon = True
        t.start()

        # before reseting registers check if the cache is utilized above a
        # threshold (e.g 80%) and evict keys using lfu policy if needed
        self.cache_lfu_eviction(threshold=0.8, sampling=0.2, to_remove=0.5)

        # reset bloom filter related registers
        for i in range(BLOOMF_REGISTERS_NUM):
            self.controller.register_reset(BLOOMF_REG_PREFIX + str(i + 1))

        # reset count min sketch related registers
        for i in range(SKETCH_REGISTERS_NUM):
            self.controller.register_reset(SKETCH_REG_PREFIX + str(i + 1))

        # reset counter register storing the query frequency of each cached item
        self.controller.counter_reset(CACHED_KEYS_COUNTER)

        print("[INFO]: Reset query statistics registers.")

    # the controller periodically checks if the memory used has exceeded a given threshold
    # (e.g 80 %) and if that is the case then it evicts keys according to an approximated
    # LFU policy inspired by REDIS (https://redis.io/topics/lru-cache))
    def cache_lfu_eviction(self, threshold=0.8, sampling=0.2, to_remove=0.5):

        # if the threshold has not been surpassed then nothing to do
        if self.used_mem_slots <= (threshold * len(self.mem_pool) *
                                   VTABLE_SLOT_SIZE):
            return

        n_samples = int(sampling * len(self.key_map.items()))

        samples = random.sample(self.key_map.items(), n_samples)

        # read the counter for each sample and store them in an array
        evict_list = []
        for key, val in samples:
            x, y, cnt_idx = self.key_map[key]
            counter = self.controller.counter_read(CACHED_KEYS_COUNTER,
                                                   cnt_idx).packets
            evict_list.append((key, counter))

        # sort the array and pick the smallest K-th counters and evict their keys
        # (this could be achieved more optimally by using quickselect)
        import operator
        evict_list.sort(key=operator.itemgetter(1))

        for i in range(int(to_remove * n_samples)):
            curr = evict_list[i]
            self.evict(curr[0])

    def setup(self):
        if self.cpu_port:
            self.controller.mirroring_add(CONTROLLER_MIRROR_SESSION,
                                          self.cpu_port)

        # create custom hash functions for count min sketch and bloom filters
        self.set_crc_custom_hashes()
        self.create_hashes()

        # set a daemon to periodically reset registers
        self.periodic_registers_reset()

        # spawn new thread to serve incoming udp connections
        # (i.e hot reports from the switch)
        #udp_t = threading.Thread(target=self.hot_reports_loop)
        #udp_t.start()

    def set_crc_custom_hashes(self):
        i = 0
        for custom_crc32, width in sorted(self.custom_calcs.items()):
            self.controller.set_crc32_parameters(custom_crc32,
                                                 crc32_polinomials[i],
                                                 0xffffffff, 0xffffffff, True,
                                                 True)
            i += 1

    def create_hashes(self):
        self.hashes = []
        for i in range(self.sketch_register_num):
            self.hashes.append(
                Crc(32, crc32_polinomials[i], True, 0xffffffff, True,
                    0xffffffff))

    # set a static allocation scheme for l2 forwarding where the mac address of
    # each host is associated with the port connecting this host to the switch
    def set_forwarding_table(self):
        for host in self.topo.get_hosts_connected_to(self.sw_name):
            port = self.topo.node_to_node_port_num(self.sw_name, host)
            host_mac = self.topo.get_host_mac(host)
            self.controller.table_add("l2_forward", "set_egress_port",
                                      [str(host_mac)], [str(port)])

    def set_value_tables(self):
        for i in range(self.vtables_num):
            self.controller.table_add("vtable_" + str(i),
                                      "process_array_" + str(i), ['1'], [])

    # this function manages the mapping between between slots in register arrays
    # and the cached items by implementing the First Fit algorithm described in
    # Memory Management section of 4.4.2 (netcache paper)
    def first_fit(self, key, value_size):

        n_slots = (value_size / (VTABLE_SLOT_SIZE + 1)) + 1
        if value_size <= 0:
            return None
        if key in self.key_map:
            return None

        for idx in range(len(self.mem_pool)):
            old_bitmap = self.mem_pool[idx]
            n_zeros = 8 - bin(old_bitmap).count("1")

            if n_zeros >= n_slots:
                cnt = 0
                bitmap = 0
                for i in reversed(range(8)):
                    if cnt >= n_slots:
                        break

                    if not self.bit_is_set(old_bitmap, i):
                        bitmap = bitmap | (1 << i)
                        cnt += 1

                # mark last n_slots 0 bits as 1 bits because we assigned
                # them to the new key and they are now allocated
                self.mem_pool[idx] = old_bitmap | bitmap

                self.used_mem_slots += bin(bitmap).count("1")

                return (idx, bitmap)

        return None

    # converts a list of 1s and 0s represented as strings and converts it
    # to a bitmap using bitwise operations (this intermediate representation
    # of a list of 1s and 0s is used to avoid low level bitwise logic inside
    # core implementation logic)
    def convert_to_bitmap(self, strlist, bitmap_len):
        bitmap = 0
        # supports only bitmaps with multiple of 8 bits size
        if bitmap_len % 8 != 0:
            return bitmap
        for i in strlist:
            bitmap = bitmap << 1
            bitmap = bitmap | int(i)

        return bitmap

    # this function checks whether the k-th bit of a given number is set
    def bit_is_set(self, n, k):
        if n & (1 << k):
            return True
        else:
            return False

    # given a key and its associated value, we update the lookup table on
    # the switch and we also update the value registers with the value
    # given as argument (stored in multiple slots)
    def insert(self, key, value, cont=True):
        # find where to put the value for given key
        mem_info = self.first_fit(key, len(value))

        # if key already exists or not space available then stop
        if mem_info == None:
            return

        vt_index, bitmap = mem_info

        # keep track of number of bytes of the value written so far
        cnt = 0

        # store the value of the key in the vtables of the switch while
        # incrementally storing a part of the value at each value table
        # if the correspoding bit of the bitmap is set
        for i in range(self.vtables_num):

            if self.bit_is_set(bitmap, self.vtables_num - i - 1):
                partial_val = value[cnt:cnt + VTABLE_SLOT_SIZE]
                self.controller.register_write(VTABLE_NAME_PREFIX + str(i),
                                               vt_index,
                                               self.str_to_int(partial_val))

                cnt += VTABLE_SLOT_SIZE

        # allocate an id from the pool to index the counter and validity register
        # (we take the last element of list because in python list is implemented
        # to optimize for inserting and removing elements from the end of the list)
        key_index = self.ids_pool.pop()

        # add the new key to the cache lookup table of the p4 switch
        self.controller.table_add(
            NETCACHE_LOOKUP_TABLE, "set_lookup_metadata",
            [str(self.str_to_int(key))],
            [str(bitmap), str(vt_index),
             str(key_index)])

        # mark cache entry for this key as valid
        self.controller.register_write("cache_status", key_index, 1)

        self.key_map[key] = vt_index, bitmap, key_index

        # inform the server about the successful cache insertion
        if cont:
            self.inform_server()

        print("Inserted key-value pair to cache: (" + key + "," + value + ")")

    # converts a string to a bytes representation and afterwards returns
    # its integer representation of width specified by argument int_width
    # (seems hacky due to restriction to use python2.7)
    def str_to_int(self, x, int_width=VTABLE_SLOT_SIZE):
        if len(x) > int_width:
            print "Error: Overflow while converting string to int"

        # add padding with 0x00 if input string size less than int_width
        bytearr = bytearray(int_width - len(x))
        bytearr.extend(x.encode('utf-8'))
        return struct.unpack(">Q", bytearr)[0]

    # given an arbitrary sized integer, the max width (in bits) of the integer
    # it returns the string representation of the number (also stripping it of
    # any '0x00' characters) (network byte order is assumed)
    def int_to_packed(self, int_val, max_width=128, word_size=32):
        num_words = max_width / word_size
        words = self.int_to_words(int_val, num_words, word_size)

        fmt = '>%dI' % (num_words)
        return struct.pack(fmt, *words).strip('\x00')

    # split up an arbitrary sized integer to words (needed to hack
    # around struct.pack limitation to convert to byte any integer
    # greater than 8 bytes)
    def int_to_words(self, int_val, num_words, word_size):
        max_int = 2**(word_size * num_words) - 1
        max_word_size = 2**word_size - 1
        words = []
        for _ in range(num_words):
            word = int_val & max_word_size
            words.append(int(word))
            int_val >>= word_size
        words.reverse()
        return words

    # update the value of the given key with the new value given as argument
    # (by allowing updates also to be done by the controller, the client is
    # also able to update keys with values bigger than the previous one)
    # in netcache paper this restriction is not resolved
    def update(self, key, value):
        # if key is not in cache then nothing to do
        if key not in self.key_map:
            return

        # update key-value pair by removing old pair and inserting new one
        self.evict(key)
        self.insert(key, value)

    # evict given key from the cache by deleting its associated entries in
    # action tables of the switch, by deallocating its memory space and by
    # marking the cache entry as valid once the deletion is completed
    def evict(self, key):

        if key not in self.key_map:
            return

        # delete entry from the lookup_table
        entry_handle = self.controller.get_handle_from_match(
            NETCACHE_LOOKUP_TABLE, [
                str(self.str_to_int(key)),
            ])

        if entry_handle is not None:
            self.controller.table_delete(NETCACHE_LOOKUP_TABLE, entry_handle)

        # delete mapping of key from controller's dictionary
        vt_idx, bitmap, key_idx = self.key_map[key]
        del self.key_map[key]

        # deallocate space from memory pool
        self.mem_pool[vt_idx] = self.mem_pool[vt_idx] ^ bitmap
        self.used_mem_slots = self.used_mem_slots - bin(bitmap).count("1")

        # free the id used to index the validity/counter register and append
        # it back to the id pool of the controller
        self.ids_pool.append(key_idx)

        # mark cache entry as valid again (should be the last thing to do)
        self.controller.register_write("cache_status", key_idx, 1)

    # used for testing purposes and static population of cache
    def dummy_populate_vtables(self):
        test_values_l = [
            "alpha", "beta", "gamma", "delta", "epsilon", "zeta", "hita",
            "theta", "yiota", "kappa", "lambda", "meta"
        ]
        test_keys_l = [
            "one", "two", "three", "four", "five", "six", "seven", "eight",
            "nine", "ten", "eleven", "twelve"
        ]
        cnt = 0
        for i in range(11):
            self.insert(test_keys_l[i], test_values_l[i], False)

    # handling reports from the switch corresponding to hot keys, updates to
    # key-value pairs or deletions - this function receives a packet, extracts
    # its netcache header and manipulates cache based on the operation field
    # of the netcache header (callback function)
    def recv_switch_updates(self, pkt):
        print("Received message from switch")

        # extract netcache header information
        if pkt.haslayer(UDP):
            ncache_header = NetcacheHeader(pkt[UDP].payload)
        elif pkt.haslayer(TCP):
            ncache_header = NetcacheHeader(pkt[TCP].payload)

        key = self.int_to_packed(ncache_header.key, max_width=128)
        value = self.int_to_packed(ncache_header.value, max_width=1024)

        op = ncache_header.op

        if op == NETCACHE_HOT_READ_QUERY:
            print("Received hot report for key = " + key)
            # if the netcache header has null value or if the "hot key"
            # reported doesn't exist then do not update cache
            if ncache_header.op == NETCACHE_KEY_NOT_FOUND:
                return

            self.insert(key, value)

        elif op == NETCACHE_DELETE_COMPLETE:
            print("Received query to delete key = " + key)
            self.evict(key)

        elif op == NETCACHE_UPDATE_COMPLETE:
            print("Received query to update key = " + key)
            self.update(key, value)

        else:
            print("Error: unrecognized operation field of netcache header")

    # sniff infinitely the interface connected to the P4 switch and when a valid netcache
    # packet is captured, handle the packet via a callback to recv_switch_updates function
    def hot_reports_loop(self):
        cpu_port_intf = str(self.topo.get_cpu_port_intf(self.sw_name))
        sniff(iface=cpu_port_intf,
              prn=self.recv_switch_updates,
              filter="port 50000")

    def main(self):
        self.set_forwarding_table()
        self.set_value_tables()
        self.dummy_populate_vtables()
        self.hot_reports_loop()
示例#2
0
class BlinkController:

    def __init__(self, topo_db, sw_name, ip_controller, port_controller, log_dir, \
        monitoring=True, routing_file=None):

        self.topo = Topology(db=topo_db)
        self.sw_name = sw_name
        self.thrift_port = self.topo.get_thrift_port(sw_name)
        self.cpu_port = self.topo.get_cpu_port_index(self.sw_name)
        self.controller = SimpleSwitchAPI(self.thrift_port)
        self.controller.reset_state()
        self.log_dir = log_dir

        print 'connecting to ', ip_controller, port_controller
        # Socket used to communicate with the controller
        self.sock_controller = socket.socket(socket.AF_INET,
                                             socket.SOCK_STREAM)
        server_address = (ip_controller, port_controller)
        self.sock_controller.connect(server_address)
        print 'Connected!'

        # Send the switch name to the controller
        self.sock_controller.sendall(str(sw_name))

        self.make_logging()

        if monitoring:
            # Monitoring scheduler
            self.t_sched = sched_timer.RepeatingTimer(10, 0.5, self.scheduling)
            self.t_sched.start()

        self.mapping_dic = {}
        tmp = list(self.topo.get_hosts()) + list(self.topo.get_p4switches())
        self.mapping_dic = {k: v for v, k in enumerate(tmp)}
        self.log.info(str(self.mapping_dic))

        self.routing_file = routing_file
        print 'routing_file ', routing_file
        if self.routing_file is not None:
            json_data = open(self.routing_file)
            self.topo_routing = json.load(json_data)

    def make_logging(self):
        # Logger for the pipeline
        logger.setup_logger('p4_to_controller', self.log_dir+'/p4_to_controller_'+ \
            str(self.sw_name)+'.log', level=logging.INFO)
        self.log = logging.getLogger('p4_to_controller')

        # Logger for the sliding window
        logger.setup_logger('p4_to_controller_sw', self.log_dir+'/p4_to_controller_'+ \
            str(self.sw_name)+'_sw.log', level=logging.INFO)
        self.log_sw = logging.getLogger('p4_to_controller_sw')

        # Logger for the rerouting
        logger.setup_logger('p4_to_controller_rerouting', self.log_dir+'/p4_to_controller_'+ \
            str(self.sw_name)+'_rerouting.log', level=logging.INFO)
        self.log_rerouting = logging.getLogger('p4_to_controller_rerouting')

        # Logger for the Flow Selector
        logger.setup_logger('p4_to_controller_fs', self.log_dir+'/p4_to_controller_'+ \
            str(self.sw_name)+'_fs.log', level=logging.INFO)
        self.log_fs = logging.getLogger('p4_to_controller_fs')

    def scheduling(self):

        for host in list(self.topo.get_hosts()):
            prefix = self.topo.get_host_ip(host) + '/24'

            # Print log about the sliding window
            for id_prefix in [
                    self.mapping_dic[host] * 2, self.mapping_dic[host] * 2 + 1
            ]:

                with HiddenPrints():
                    sw_time = float(
                        self.controller.register_read('sw_time',
                                                      index=id_prefix)) / 1000.
                    sw_index = self.controller.register_read('sw_index',
                                                             index=id_prefix)
                    sw_sum = self.controller.register_read('sw_sum',
                                                           index=id_prefix)
                self.log_sw.info('sw_time\t' + host + '\t' + prefix + '\t' +
                                 str(id_prefix) + '\t' + str(sw_time))
                self.log_sw.info('sw_index\t' + host + '\t' + prefix + '\t' +
                                 str(id_prefix) + '\t' + str(sw_index))

                if sw_sum >= 32:
                    self.log_sw.info('sw_sum\t' + host + '\t' + prefix + '\t' +
                                     str(id_prefix) + '\t' + str(sw_sum) +
                                     '\tREROUTING')
                else:
                    self.log_sw.info('sw_sum\t' + host + '\t' + prefix + '\t' +
                                     str(id_prefix) + '\t' + str(sw_sum))

                sw = []
                tmp = 'sw ' + host + ' ' + prefix + ' ' + str(id_prefix) + '\t'
                for i in range(0, 10):
                    with HiddenPrints():
                        binvalue = int(
                            self.controller.register_read(
                                'sw', (id_prefix * 10) + i))
                    tmp = tmp + str(binvalue) + ','
                    sw.append(binvalue)
                tmp = tmp[:-1]
                self.log_sw.info(str(tmp))

        # Print log about rerouting
        for host in list(self.topo.get_hosts()):
            prefix = self.topo.get_host_ip(host) + '/24'

            for id_prefix in [
                    self.mapping_dic[host] * 2, self.mapping_dic[host] * 2 + 1
            ]:

                with HiddenPrints():
                    nh_avaibility_1 = self.controller.register_read(
                        'nh_avaibility_1', index=id_prefix)
                    nh_avaibility_2 = self.controller.register_read(
                        'nh_avaibility_2', index=id_prefix)
                    nh_avaibility_3 = self.controller.register_read(
                        'nh_avaibility_3', index=id_prefix)
                    nbflows_progressing_2 = self.controller.register_read(
                        'nbflows_progressing_2', index=id_prefix)
                    nbflows_progressing_3 = self.controller.register_read(
                        'nbflows_progressing_3', index=id_prefix)
                    rerouting_ts = self.controller.register_read(
                        'rerouting_ts', index=id_prefix)
                    threshold = self.controller.register_read(
                        'threshold_registers', index=id_prefix)

                self.log_rerouting.info('nh_avaibility\t'+host+'\t'+prefix+'\t'+ \
                str(id_prefix)+'\t'+str(nh_avaibility_1)+'\t'+ \
                str(nh_avaibility_2)+'\t'+str(nh_avaibility_3))
                self.log_rerouting.info('nblows_progressing\t'+host+'\t'+prefix+'\t'+ \
                str(id_prefix)+'\t'+str(nbflows_progressing_2)+'\t'+ \
                str(nbflows_progressing_3))
                self.log_rerouting.info('rerouting_ts\t'+host+'\t'+prefix+'\t'+ \
                str(id_prefix)+'\t'+str(rerouting_ts))
                self.log_rerouting.info('threshold\t'+host+'\t'+prefix+'\t'+ \
                str(id_prefix)+'\t'+str(threshold))

                nexthop_str = ''
                nha = [nh_avaibility_1, nh_avaibility_2, nh_avaibility_3]
                i = 0
                if self.routing_file is not None:
                    bgp_type = 'customer' if id_prefix % 2 == 0 else 'customer_provider_peer'
                    if bgp_type not in self.topo_routing['switches'][
                            self.sw_name]['prefixes'][host]:
                        nexthop_str = 'NoPathAvailable'
                    else:
                        if len(self.topo_routing['switches'][self.sw_name]
                               ['prefixes'][host][bgp_type]) == 2:
                            self.topo_routing['switches'][self.sw_name][
                                'prefixes'][host][bgp_type].append(
                                    self.topo_routing['switches'][self.sw_name]
                                    ['prefixes'][host][bgp_type][-1])
                        for nexthop in self.topo_routing['switches'][
                                self.sw_name]['prefixes'][host][bgp_type]:
                            tmp = 'y' if nha[i] == 0 else 'n'
                            nexthop_str = nexthop_str + str(
                                nexthop) + '(' + tmp + ')\t'
                            i += 1
                        nexthop_str = nexthop_str[:-1]
                self.log_rerouting.info('nexthop\t'+host+'\t'+prefix+'\t'+ \
                str(id_prefix)+'\t'+str(nexthop_str))

        # Print log about the flow selector
        for host in list(self.topo.get_hosts()):
            prefix = self.topo.get_host_ip(host) + '/24'

            for id_prefix in [
                    self.mapping_dic[host] * 2, self.mapping_dic[host] * 2 + 1
            ]:

                sw = []
                tmp = 'fs_key ' + host + ' ' + prefix + ' ' + str(
                    id_prefix) + '\t'
                for i in range(0, 64):
                    with HiddenPrints():
                        binvalue = int(
                            self.controller.register_read(
                                'flowselector_key', 64 * id_prefix + i))
                    tmp = tmp + str(binvalue) + ','
                    sw.append(binvalue)
                tmp = tmp[:-1]
                self.log_fs.info(str(tmp))

                sw = []
                tmp = 'fs ' + host + ' ' + prefix + ' ' + str(id_prefix) + '\t'
                for i in range(0, 64):
                    with HiddenPrints():
                        binvalue = int(
                            self.controller.register_read(
                                'flowselector_ts', 64 * id_prefix + i))
                    tmp = tmp + str(binvalue) + ','
                    sw.append(binvalue)
                tmp = tmp[:-1]
                self.log_fs.info(str(tmp))

                sw = []
                tmp = 'fs_last_ret ' + host + ' ' + prefix + ' ' + str(
                    id_prefix) + '\t'
                for i in range(0, 64):
                    with HiddenPrints():
                        binvalue = int(
                            self.controller.register_read(
                                'flowselector_last_ret', 64 * id_prefix + i))
                    tmp = tmp + str(binvalue) + ','
                    sw.append(binvalue)
                tmp = tmp[:-1]
                self.log_fs.info(str(tmp))

                sw = []
                tmp = 'fs_last_ret_bin ' + host + ' ' + prefix + ' ' + str(
                    id_prefix) + '\t'
                for i in range(0, 64):
                    with HiddenPrints():
                        binvalue = int(
                            self.controller.register_read(
                                'flowselector_last_ret_bin',
                                64 * id_prefix + i))
                    tmp = tmp + str(binvalue) + ','
                    sw.append(binvalue)
                tmp = tmp[:-1]
                self.log_fs.info(str(tmp))

                sw = []
                tmp = 'fs_fwloops ' + host + ' ' + prefix + ' ' + str(
                    id_prefix) + '\t'
                for i in range(0, 64):
                    with HiddenPrints():
                        binvalue = int(
                            self.controller.register_read(
                                'flowselector_fwloops', 64 * id_prefix + i))
                    tmp = tmp + str(binvalue) + ','
                    sw.append(binvalue)
                tmp = tmp[:-1]
                self.log_fs.info(str(tmp))

                sw = []
                tmp = 'fs_correctness ' + host + ' ' + prefix + ' ' + str(
                    id_prefix) + '\t'
                for i in range(0, 64):
                    with HiddenPrints():
                        binvalue = int(
                            self.controller.register_read(
                                'flowselector_correctness',
                                64 * id_prefix + i))
                    tmp = tmp + str(binvalue) + ','
                    sw.append(binvalue)
                tmp = tmp[:-1]
                self.log_fs.info(str(tmp))

    def forwarding(self):
        p4switches = self.topo.get_p4switches()
        interfaces_to_node = p4switches[self.sw_name]['interfaces_to_node']

        for k, v in interfaces_to_node.items():

            try:
                dst_mac = self.topo.get_hosts()[v][self.sw_name]['mac']
            except KeyError:
                dst_mac = self.topo.get_p4switches()[v][self.sw_name]['mac']

            src_mac = p4switches[self.sw_name][v]['mac']
            outport = p4switches[self.sw_name]['interfaces_to_port'][
                p4switches[self.sw_name][v]['intf']]

            self.log.info('table add send set_nh ' + str(self.mapping_dic[v]) +
                          ' => ' + str(outport) + ' ' + str(src_mac) + ' ' +
                          str(dst_mac))
            self.controller.table_add(
                'send', 'set_nh', [str(self.mapping_dic[v])],
                [str(outport), str(src_mac),
                 str(dst_mac)])

    def run(self):

        sock_list = [self.sock_controller]
        controller_data = ''

        while True:
            inready, outready, excepready = select.select(sock_list, [], [])

            for sock in inready:
                if sock == self.sock_controller:
                    data_tmp = ''
                    toreturn = None

                    try:
                        data_tmp = sock.recv(100000000)
                    except socket.error, e:
                        err = e.args[0]
                        if not (err == errno.EAGAIN
                                or err == errno.EWOULDBLOCK):
                            print 'p4_to_controller: ', e
                            sock.close()
                            sock = None

                    if len(data_tmp) > 0:
                        controller_data += data_tmp

                        next_data = ''
                        while len(controller_data
                                  ) > 0 and controller_data[-1] != '\n':
                            next_data = controller_data[-1] + next_data
                            controller_data = controller_data[:-1]

                        toreturn = controller_data
                        controller_data = next_data

                    if toreturn is not None:
                        for line in toreturn.split('\n'):
                            if line.startswith('table add '):
                                line = line.rstrip('\n').replace(
                                    'table add ', '')

                                fwtable_name = line.split(' ')[0]
                                action_name = line.split(' ')[1]

                                match_list = line.split(' => ')[0].split(
                                    ' ')[2:]
                                action_list = line.split(' => ')[1].split(' ')

                                print line
                                print fwtable_name, action_name, match_list, action_list

                                self.log.info(line)
                                self.controller.table_add(fwtable_name, action_name, \
                                    match_list, action_list)

                            if line.startswith('do_register_write'):
                                line = line.rstrip('\n')
                                linetab = line.split(' ')

                                register_name = linetab[1]
                                index = int(linetab[2])
                                value = int(linetab[3])

                                self.log.info(line)
                                self.controller.register_write(register_name, \
                                    index, value)

                            if line.startswith('reset_states'):
                                self.log.info('RESETTING_STATES')

                                # First stop the scheduler to avoid concurrent used
                                # of the Thirft server
                                self.t_sched.cancel()
                                while self.t_sched.running:  # Wait the end of the log printing
                                    time.sleep(0.5)

                                time.sleep(1)

                                # Reset the state of the switch
                                self.controller.register_reset(
                                    'nh_avaibility_1')
                                self.controller.register_reset(
                                    'nh_avaibility_2')
                                self.controller.register_reset(
                                    'nh_avaibility_3')
                                self.controller.register_reset(
                                    'nbflows_progressing_2')
                                self.controller.register_reset(
                                    'nbflows_progressing_3')
                                self.controller.register_reset('rerouting_ts')
                                self.controller.register_reset(
                                    'timestamp_reference')
                                self.controller.register_reset('sw_time')
                                self.controller.register_reset('sw_index')
                                self.controller.register_reset('sw_sum')
                                self.controller.register_reset('sw')
                                self.controller.register_reset(
                                    'flowselector_key')
                                self.controller.register_reset(
                                    'flowselector_nep')
                                self.controller.register_reset(
                                    'flowselector_ts')
                                self.controller.register_reset(
                                    'flowselector_last_ret')
                                self.controller.register_reset(
                                    'flowselector_last_ret_bin')
                                self.controller.register_reset(
                                    'flowselector_correctness')
                                self.controller.register_reset(
                                    'flowselector_fwloops')

                                print self.sw_name, ' RESET.'

                                # Restart the scheduler
                                time.sleep(1)
                                self.t_sched.start()