class LearningSwitchControllerApp(object):

    def __init__(self, switchName):
        self.topo = Topology(db="topology.db")
        self.switchName = switchName
        self.thrift_port = self.topo.get_thrift_port(switchName)
        self.cpu_port = self.topo.get_cpu_port_index(self.switchName)
        self.controller = SimpleSwitchAPI(self.thrift_port)

        self.init()

    def init(self):
        self.controller.reset_state()
        self.add_mcast_grp()
        self.add_mirror()

    def add_mirror(self):
        if self.cpu_port:
            self.controller.mirroring_add(MIRROR_SESSION_ID, self.cpu_port)

    def add_mcast_grp(self):
        interfaces_to_port = self.topo[self.switchName]["interfaces_to_port"].copy()
        # filter lo and cpu port
        interfaces_to_port.pop('lo', None)
        interfaces_to_port.pop(self.topo.get_cpu_port_intf(self.switchName), None)

        mc_grp_id = 1
        rid = 0
        # add multicast group
        self.controller.mc_mgrp_create(mc_grp_id)
        port_list = interfaces_to_port.values()[:]
        # add multicast node group
        handle = self.controller.mc_node_create(rid, port_list)
        # associate with mc grp
        self.controller.mc_node_associate(mc_grp_id, handle)

    def learn(self, learningData):
        for macAddr, ingressPort in learningData:
            print("macAddr: %012X ingressPort: %s ", macAddr, ingressPort)
            self.controller.table_add("srcMacAddr", "NoAction", [str(macAddr)])
            self.controller.table_add("dstMacAddr", "forward", [
                                      str(macAddr)], [str(ingressPort)])

    def recv_msg_cpu(self, pkt):

        packet = Ether(str(pkt))
        if packet.type == L2_LEARN_ETHER_TYPE:
            cpu_header = CpuHeader(bytes(packet.payload))
            self.learn([(cpu_header.macAddr, cpu_header.ingressPort)])

    def run_cpu_port_loop(self):

        cpu_port_intf = str(self.topo.get_cpu_port_intf(
            self.switchName).replace("eth0", "eth1"))
        sniff(iface=cpu_port_intf, prn=self.recv_msg_cpu)
Ejemplo n.º 2
0
class FloodingController(object):

    def __init__(self, sw_name):

        self.topo = Topology(db="topology.db")
        self.sw_name = sw_name
        self.thrift_port = self.topo.get_thrift_port(sw_name)
        self.cpu_port =  self.topo.get_cpu_port_index(self.sw_name)
        self.controller = SimpleSwitchAPI(self.thrift_port)
        self.init()

    def init(self):

        self.controller.reset_state()
        self.fill_dmac_table()
        self.add_boadcast_groups()

    def fill_dmac_table(self):
        self.controller.table_add("dmac", "forward", ['00:00:0a:00:00:01'], ['1'])
        self.controller.table_add("dmac", "forward", ['00:00:0a:00:00:02'], ['2'])
        self.controller.table_add("dmac", "forward", ['00:00:0a:00:00:03'], ['3'])
        self.controller.table_add("dmac", "forward", ['00:00:0a:00:00:04'], ['4'])
        self.controller.table_set_default("dmac", "broadcast", [])

    def add_boadcast_groups(self):

        interfaces_to_port = self.topo[self.sw_name]["interfaces_to_port"].copy()
        #filter lo and cpu port
        interfaces_to_port.pop('lo', None)
        interfaces_to_port.pop(self.topo.get_cpu_port_intf(self.sw_name), None)

        mc_grp_id = 1
        rid = 0
        for ingress_port in interfaces_to_port.values():

            port_list = interfaces_to_port.values()[:]
            del(port_list[port_list.index(ingress_port)])

            #add multicast group
            self.controller.mc_mgrp_create(mc_grp_id)

            #add multicast node group
            handle = self.controller.mc_node_create(rid, port_list)

            #associate with mc grp
            self.controller.mc_node_associate(mc_grp_id, handle)

            #fill broadcast table
            self.controller.table_add("select_mcast_grp", "set_mcast_grp", [str(ingress_port)], [str(mc_grp_id)])

            mc_grp_id +=1
            rid +=1
Ejemplo n.º 3
0
class myController(object):
    def __init__(self):
        self.topo = Topology(db="topology.db")
        self.controllers = {}
        self.connect_to_switches()
 
    def connect_to_switches(self):
        for p4switch in self.topo.get_p4switches():
            thrift_port = self.topo.get_thrift_port(p4switch)
            #print "p4switch:", p4switch, "thrift_port:", thrift_port
            self.controllers[p4switch] = SimpleSwitchAPI(thrift_port)   
 
    def recv_msg_cpu(self, pkt):
        print "interface:", pkt.sniffed_on
        print "summary:", pkt.summary()
	global val1,val2

	if TCP in pkt and pkt[TCP].flags==2:
	  src = pkt.sprintf('{IP:%IP.src%}')
          dst = pkt.sprintf('{IP:%IP.dst%}')
          count1[(src, dst)] += 1
	  val1=count1[(src, dst)]
	  print "count1[",src,",",dst,"]=",count1[(src, dst)]
	if TCP in pkt and pkt[TCP].flags==18:
	  src = pkt.sprintf('{IP:%IP.src%}')
          dst = pkt.sprintf('{IP:%IP.dst%}')
          count2[(dst, src)] += 1
          val2=count2[(dst, src)]
	  print "count2[",dst,",",src,"]=",count2[(dst, src)]
	
	print "val1:", val1, " val2:", val2
        if (val1-val2>=3) and (TCP in pkt) and pkt[TCP].flags==2:
	   src = pkt.sprintf('{IP:%IP.src%}')
	   if src not in blockip:
             self.controllers["s1"].table_add("block_pkt", "_drop", [str(src)], [])
	     blockip.append(src) 
	   
     
    def run_cpu_port_loop(self):
        cpu_interfaces = [str(self.topo.get_cpu_port_intf(sw_name).replace("eth0", "eth1")) for sw_name in self.controllers]
        sniff(iface=cpu_interfaces, prn=self.recv_msg_cpu)
Ejemplo n.º 4
0
class L2Controller(object):

    def __init__(self, sw_name):

        self.topo = Topology(db="topology.db")
        self.sw_name = sw_name
        self.thrift_port = self.topo.get_thrift_port(sw_name)
        self.cpu_port =  self.topo.get_cpu_port_index(self.sw_name)
        self.controller = SimpleSwitchAPI(self.thrift_port)

        self.init()

    def init(self):

        self.controller.reset_state()
        self.add_boadcast_groups()
        self.add_mirror()
        #self.fill_table_test()

    def add_mirror(self):

        if self.cpu_port:
            self.controller.mirroring_add(100, self.cpu_port)

    def add_boadcast_groups(self):

        interfaces_to_port = self.topo[self.sw_name]["interfaces_to_port"].copy()
        #filter lo and cpu port
        interfaces_to_port.pop('lo', None)
        interfaces_to_port.pop(self.topo.get_cpu_port_intf(self.sw_name), None)

        mc_grp_id = 1
        rid = 0
        for ingress_port in interfaces_to_port.values():

            port_list = interfaces_to_port.values()[:]
            del(port_list[port_list.index(ingress_port)])

            #add multicast group
            self.controller.mc_mgrp_create(mc_grp_id)

            #add multicast node group
            handle = self.controller.mc_node_create(rid, port_list)

            #associate with mc grp
            self.controller.mc_node_associate(mc_grp_id, handle)

            #fill broadcast table
            self.controller.table_add("broadcast", "set_mcast_grp", [str(ingress_port)], [str(mc_grp_id)])

            mc_grp_id +=1
            rid +=1

    def fill_table_test(self):
        self.controller.table_add("dmac", "forward", ['00:00:0a:00:00:01'], ['1'])
        self.controller.table_add("dmac", "forward", ['00:00:0a:00:00:02'], ['2'])
        self.controller.table_add("dmac", "forward", ['00:00:0a:00:00:03'], ['3'])
        self.controller.table_add("dmac", "forward", ['00:00:0a:00:00:04'], ['4'])


    def learn(self, learning_data):

        for mac_addr, ingress_port in  learning_data:
            print "mac: %012X ingress_port: %s " % (mac_addr, ingress_port)
            self.controller.table_add("smac", "NoAction", [str(mac_addr)])
            self.controller.table_add("dmac", "forward", [str(mac_addr)], [str(ingress_port)])

    def unpack_digest(self, msg, num_samples):

        digest = []
        print len(msg), num_samples
        starting_index = 32
        for sample in range(num_samples):
            mac0, mac1, ingress_port = struct.unpack(">LHH", msg[starting_index:starting_index+8])
            starting_index +=8
            mac_addr = (mac0 << 16) + mac1
            digest.append((mac_addr, ingress_port))

        return digest

    def recv_msg_digest(self, msg):

        topic, device_id, ctx_id, list_id, buffer_id, num = struct.unpack("<iQiiQi",
                                                                          msg[:32])
        digest = self.unpack_digest(msg, num)
        self.learn(digest)

        #Acknowledge digest
        self.controller.client.bm_learning_ack_buffer(ctx_id, list_id, buffer_id)


    def run_digest_loop(self):

        sub = nnpy.Socket(nnpy.AF_SP, nnpy.SUB)
        notifications_socket = self.controller.client.bm_mgmt_get_info().notifications_socket
        sub.connect(notifications_socket)
        sub.setsockopt(nnpy.SUB, nnpy.SUB_SUBSCRIBE, '')

        while True:
            msg = sub.recv()
            self.recv_msg_digest(msg)

    def recv_msg_cpu(self, pkt):

        packet = Ether(str(pkt))

        if packet.type == 0x1234:
            cpu_header = CpuHeader(packet.payload)
            self.learn([(cpu_header.macAddr, cpu_header.ingress_port)])

    def run_cpu_port_loop(self):

        cpu_port_intf = str(self.topo.get_cpu_port_intf(self.sw_name).replace("eth0", "eth1"))
        sniff(iface=cpu_port_intf, prn=self.recv_msg_cpu)
Ejemplo n.º 5
0
class Controller(object):
    def __init__(self, sw_name):
        self.topo = Topology(db="topology.db")
        self.sw_name = sw_name
        self.thrift_port = self.topo.get_thrift_port(sw_name)
        self.controller = SimpleSwitchAPI(self.thrift_port)
        self.registers = []
        self.cpu_port_intf = str(
            self.topo.get_cpu_port_intf(self.sw_name).replace("eth0", "eth1"))

    def reset_registers(self):  #Reset the values of queue size registers
        for i in range(NUMBER_OF_QUEUE_REGISTERS):
            print self.controller.register_reset("queue_lengths", i)

    def read_queue_registers(self):  #Read and print queue size registers
        self.queue_registers = []
        for i in range(NUMBER_OF_QUEUE_REGISTERS):
            self.queue_registers.append(
                self.controller.register_read("queue_lengths", i))
        print self.queue_registers

    def table_add_session(self, srcIP, dstIP, protocol, sport, dport,
                          currentPort):
        #table_add(table_name, action_name, match_keys, action_params=[], prio=None)
        self.controller.table_add("ingressTable", "sessionForward",
                                  [srcIP, dstIP, protocol, sport, dport],
                                  [str(currentPort)])

    def packet_callback(self, packet):
        global packetCounter, currentLoad, currentPort, socket1

        #currentPort = randint(2, 5)

        packet[Ether].src = "76:66:5e:20:a5:fb"

        srcIP = packet[IP].src
        dstIP = packet[IP].dst
        protocol = str(packet[IP].proto)

        if (packet[IP].proto == 6 or packet[IP].proto == 17):  #TCP, UDP
            packetCounter += 1
            print("Packet " + str(packetCounter) + " Arrived to Control Plane")
            dport = str(packet[IP].dport)
            sport = str(packet[IP].sport)

            sessionTuple = (srcIP, dstIP, protocol, dport, sport)

            if (sessionDict.get(sessionTuple) != None):  #if session exists
                socket1.send(
                    str(packet)
                )  #simply send the packet if table entry is not added it will come back to CPU
            else:
                sessionDict[
                    sessionTuple] = currentPort  # write session to dictionary
                self.table_add_session(
                    srcIP, dstIP, protocol, sport, dport,
                    currentPort)  #add table entry to dataplane
                socket1.send(str(packet))  #send the original packet

    def read_registers_loop(self):
        while True:
            self.read_queue_registers()

    def run_cpu_port_loop(self):
        sniff(iface="s1-cpu-eth1",
              filter="ip and not ether src 76:66:5e:20:a5:fb",
              prn=self.packet_callback)
Ejemplo n.º 6
0
class NCacheController(object):
    def __init__(self, sw_name, vtables_num=8):
        self.topo = Topology(db="../p4/topology.db")
        self.sw_name = sw_name
        self.thrift_port = self.topo.get_thrift_port(self.sw_name)
        self.cpu_port = self.topo.get_cpu_port_index(self.sw_name)
        self.controller = SimpleSwitchAPI(self.thrift_port)

        self.custom_calcs = self.controller.get_custom_crc_calcs()
        self.sketch_register_num = len(self.custom_calcs)

        self.vtables = []
        self.vtables_num = vtables_num

        # create a pool of ids (as much as the total amount of keys)
        # this pool will be used to assign index to keys which will be
        # used to index the cached key counter and the validity register
        self.ids_pool = range(0, VTABLE_ENTRIES * VTABLE_SLOT_SIZE)

        # array of bitmap, which marks available slots per cache line
        # as 0 bits and occupied slots as 1 bits
        self.mem_pool = [0] * VTABLE_ENTRIES

        # number of memory slots used (useful for lfu eviction policy)
        self.used_mem_slots = 0

        # dictionary storing the value table index, bitmap and counter/validity
        # register index in the P4 switch that corresponds to each key
        self.key_map = {}

        self.setup()

        #self.out_of_band_test()

    def inform_server(self):
        sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
        try:
            sock.connect(UNIX_CHANNEL)
        except socket.error as msg:
            #print('Error: Unable to contact server for cache operation completion')
            return

        sock.sendall(CACHE_INSERT_COMPLETE)

    # reports the value of counters for each cached key
    # (used only for debugging purposes)
    def report_counters(self):
        for key, val in self.key_map.items():
            vt_idx, bitmap, key_idx = val

            res = self.controller.counter_read(CACHED_KEYS_COUNTER, key_idx)
            if res != 0:
                print("[COUNTER] key = " + key + " [ " + str(res.packets) +
                      " ]")

    # periodically reset registers pertaining to query statistics module of the
    # P4 switch (count-min sketch registers, bloom filters and counters)
    def periodic_registers_reset(self):
        t = threading.Timer(STATISTICS_REFRESH_INTERVAL,
                            self.periodic_registers_reset)
        t.daemon = True
        t.start()

        # before reseting registers check if the cache is utilized above a
        # threshold (e.g 80%) and evict keys using lfu policy if needed
        self.cache_lfu_eviction(threshold=0.8, sampling=0.2, to_remove=0.5)

        # reset bloom filter related registers
        for i in range(BLOOMF_REGISTERS_NUM):
            self.controller.register_reset(BLOOMF_REG_PREFIX + str(i + 1))

        # reset count min sketch related registers
        for i in range(SKETCH_REGISTERS_NUM):
            self.controller.register_reset(SKETCH_REG_PREFIX + str(i + 1))

        # reset counter register storing the query frequency of each cached item
        self.controller.counter_reset(CACHED_KEYS_COUNTER)

        print("[INFO]: Reset query statistics registers.")

    # the controller periodically checks if the memory used has exceeded a given threshold
    # (e.g 80 %) and if that is the case then it evicts keys according to an approximated
    # LFU policy inspired by REDIS (https://redis.io/topics/lru-cache))
    def cache_lfu_eviction(self, threshold=0.8, sampling=0.2, to_remove=0.5):

        # if the threshold has not been surpassed then nothing to do
        if self.used_mem_slots <= (threshold * len(self.mem_pool) *
                                   VTABLE_SLOT_SIZE):
            return

        n_samples = int(sampling * len(self.key_map.items()))

        samples = random.sample(self.key_map.items(), n_samples)

        # read the counter for each sample and store them in an array
        evict_list = []
        for key, val in samples:
            x, y, cnt_idx = self.key_map[key]
            counter = self.controller.counter_read(CACHED_KEYS_COUNTER,
                                                   cnt_idx).packets
            evict_list.append((key, counter))

        # sort the array and pick the smallest K-th counters and evict their keys
        # (this could be achieved more optimally by using quickselect)
        import operator
        evict_list.sort(key=operator.itemgetter(1))

        for i in range(int(to_remove * n_samples)):
            curr = evict_list[i]
            self.evict(curr[0])

    def setup(self):
        if self.cpu_port:
            self.controller.mirroring_add(CONTROLLER_MIRROR_SESSION,
                                          self.cpu_port)

        # create custom hash functions for count min sketch and bloom filters
        self.set_crc_custom_hashes()
        self.create_hashes()

        # set a daemon to periodically reset registers
        self.periodic_registers_reset()

        # spawn new thread to serve incoming udp connections
        # (i.e hot reports from the switch)
        #udp_t = threading.Thread(target=self.hot_reports_loop)
        #udp_t.start()

    def set_crc_custom_hashes(self):
        i = 0
        for custom_crc32, width in sorted(self.custom_calcs.items()):
            self.controller.set_crc32_parameters(custom_crc32,
                                                 crc32_polinomials[i],
                                                 0xffffffff, 0xffffffff, True,
                                                 True)
            i += 1

    def create_hashes(self):
        self.hashes = []
        for i in range(self.sketch_register_num):
            self.hashes.append(
                Crc(32, crc32_polinomials[i], True, 0xffffffff, True,
                    0xffffffff))

    # set a static allocation scheme for l2 forwarding where the mac address of
    # each host is associated with the port connecting this host to the switch
    def set_forwarding_table(self):
        for host in self.topo.get_hosts_connected_to(self.sw_name):
            port = self.topo.node_to_node_port_num(self.sw_name, host)
            host_mac = self.topo.get_host_mac(host)
            self.controller.table_add("l2_forward", "set_egress_port",
                                      [str(host_mac)], [str(port)])

    def set_value_tables(self):
        for i in range(self.vtables_num):
            self.controller.table_add("vtable_" + str(i),
                                      "process_array_" + str(i), ['1'], [])

    # this function manages the mapping between between slots in register arrays
    # and the cached items by implementing the First Fit algorithm described in
    # Memory Management section of 4.4.2 (netcache paper)
    def first_fit(self, key, value_size):

        n_slots = (value_size / (VTABLE_SLOT_SIZE + 1)) + 1
        if value_size <= 0:
            return None
        if key in self.key_map:
            return None

        for idx in range(len(self.mem_pool)):
            old_bitmap = self.mem_pool[idx]
            n_zeros = 8 - bin(old_bitmap).count("1")

            if n_zeros >= n_slots:
                cnt = 0
                bitmap = 0
                for i in reversed(range(8)):
                    if cnt >= n_slots:
                        break

                    if not self.bit_is_set(old_bitmap, i):
                        bitmap = bitmap | (1 << i)
                        cnt += 1

                # mark last n_slots 0 bits as 1 bits because we assigned
                # them to the new key and they are now allocated
                self.mem_pool[idx] = old_bitmap | bitmap

                self.used_mem_slots += bin(bitmap).count("1")

                return (idx, bitmap)

        return None

    # converts a list of 1s and 0s represented as strings and converts it
    # to a bitmap using bitwise operations (this intermediate representation
    # of a list of 1s and 0s is used to avoid low level bitwise logic inside
    # core implementation logic)
    def convert_to_bitmap(self, strlist, bitmap_len):
        bitmap = 0
        # supports only bitmaps with multiple of 8 bits size
        if bitmap_len % 8 != 0:
            return bitmap
        for i in strlist:
            bitmap = bitmap << 1
            bitmap = bitmap | int(i)

        return bitmap

    # this function checks whether the k-th bit of a given number is set
    def bit_is_set(self, n, k):
        if n & (1 << k):
            return True
        else:
            return False

    # given a key and its associated value, we update the lookup table on
    # the switch and we also update the value registers with the value
    # given as argument (stored in multiple slots)
    def insert(self, key, value, cont=True):
        # find where to put the value for given key
        mem_info = self.first_fit(key, len(value))

        # if key already exists or not space available then stop
        if mem_info == None:
            return

        vt_index, bitmap = mem_info

        # keep track of number of bytes of the value written so far
        cnt = 0

        # store the value of the key in the vtables of the switch while
        # incrementally storing a part of the value at each value table
        # if the correspoding bit of the bitmap is set
        for i in range(self.vtables_num):

            if self.bit_is_set(bitmap, self.vtables_num - i - 1):
                partial_val = value[cnt:cnt + VTABLE_SLOT_SIZE]
                self.controller.register_write(VTABLE_NAME_PREFIX + str(i),
                                               vt_index,
                                               self.str_to_int(partial_val))

                cnt += VTABLE_SLOT_SIZE

        # allocate an id from the pool to index the counter and validity register
        # (we take the last element of list because in python list is implemented
        # to optimize for inserting and removing elements from the end of the list)
        key_index = self.ids_pool.pop()

        # add the new key to the cache lookup table of the p4 switch
        self.controller.table_add(
            NETCACHE_LOOKUP_TABLE, "set_lookup_metadata",
            [str(self.str_to_int(key))],
            [str(bitmap), str(vt_index),
             str(key_index)])

        # mark cache entry for this key as valid
        self.controller.register_write("cache_status", key_index, 1)

        self.key_map[key] = vt_index, bitmap, key_index

        # inform the server about the successful cache insertion
        if cont:
            self.inform_server()

        print("Inserted key-value pair to cache: (" + key + "," + value + ")")

    # converts a string to a bytes representation and afterwards returns
    # its integer representation of width specified by argument int_width
    # (seems hacky due to restriction to use python2.7)
    def str_to_int(self, x, int_width=VTABLE_SLOT_SIZE):
        if len(x) > int_width:
            print "Error: Overflow while converting string to int"

        # add padding with 0x00 if input string size less than int_width
        bytearr = bytearray(int_width - len(x))
        bytearr.extend(x.encode('utf-8'))
        return struct.unpack(">Q", bytearr)[0]

    # given an arbitrary sized integer, the max width (in bits) of the integer
    # it returns the string representation of the number (also stripping it of
    # any '0x00' characters) (network byte order is assumed)
    def int_to_packed(self, int_val, max_width=128, word_size=32):
        num_words = max_width / word_size
        words = self.int_to_words(int_val, num_words, word_size)

        fmt = '>%dI' % (num_words)
        return struct.pack(fmt, *words).strip('\x00')

    # split up an arbitrary sized integer to words (needed to hack
    # around struct.pack limitation to convert to byte any integer
    # greater than 8 bytes)
    def int_to_words(self, int_val, num_words, word_size):
        max_int = 2**(word_size * num_words) - 1
        max_word_size = 2**word_size - 1
        words = []
        for _ in range(num_words):
            word = int_val & max_word_size
            words.append(int(word))
            int_val >>= word_size
        words.reverse()
        return words

    # update the value of the given key with the new value given as argument
    # (by allowing updates also to be done by the controller, the client is
    # also able to update keys with values bigger than the previous one)
    # in netcache paper this restriction is not resolved
    def update(self, key, value):
        # if key is not in cache then nothing to do
        if key not in self.key_map:
            return

        # update key-value pair by removing old pair and inserting new one
        self.evict(key)
        self.insert(key, value)

    # evict given key from the cache by deleting its associated entries in
    # action tables of the switch, by deallocating its memory space and by
    # marking the cache entry as valid once the deletion is completed
    def evict(self, key):

        if key not in self.key_map:
            return

        # delete entry from the lookup_table
        entry_handle = self.controller.get_handle_from_match(
            NETCACHE_LOOKUP_TABLE, [
                str(self.str_to_int(key)),
            ])

        if entry_handle is not None:
            self.controller.table_delete(NETCACHE_LOOKUP_TABLE, entry_handle)

        # delete mapping of key from controller's dictionary
        vt_idx, bitmap, key_idx = self.key_map[key]
        del self.key_map[key]

        # deallocate space from memory pool
        self.mem_pool[vt_idx] = self.mem_pool[vt_idx] ^ bitmap
        self.used_mem_slots = self.used_mem_slots - bin(bitmap).count("1")

        # free the id used to index the validity/counter register and append
        # it back to the id pool of the controller
        self.ids_pool.append(key_idx)

        # mark cache entry as valid again (should be the last thing to do)
        self.controller.register_write("cache_status", key_idx, 1)

    # used for testing purposes and static population of cache
    def dummy_populate_vtables(self):
        test_values_l = [
            "alpha", "beta", "gamma", "delta", "epsilon", "zeta", "hita",
            "theta", "yiota", "kappa", "lambda", "meta"
        ]
        test_keys_l = [
            "one", "two", "three", "four", "five", "six", "seven", "eight",
            "nine", "ten", "eleven", "twelve"
        ]
        cnt = 0
        for i in range(11):
            self.insert(test_keys_l[i], test_values_l[i], False)

    # handling reports from the switch corresponding to hot keys, updates to
    # key-value pairs or deletions - this function receives a packet, extracts
    # its netcache header and manipulates cache based on the operation field
    # of the netcache header (callback function)
    def recv_switch_updates(self, pkt):
        print("Received message from switch")

        # extract netcache header information
        if pkt.haslayer(UDP):
            ncache_header = NetcacheHeader(pkt[UDP].payload)
        elif pkt.haslayer(TCP):
            ncache_header = NetcacheHeader(pkt[TCP].payload)

        key = self.int_to_packed(ncache_header.key, max_width=128)
        value = self.int_to_packed(ncache_header.value, max_width=1024)

        op = ncache_header.op

        if op == NETCACHE_HOT_READ_QUERY:
            print("Received hot report for key = " + key)
            # if the netcache header has null value or if the "hot key"
            # reported doesn't exist then do not update cache
            if ncache_header.op == NETCACHE_KEY_NOT_FOUND:
                return

            self.insert(key, value)

        elif op == NETCACHE_DELETE_COMPLETE:
            print("Received query to delete key = " + key)
            self.evict(key)

        elif op == NETCACHE_UPDATE_COMPLETE:
            print("Received query to update key = " + key)
            self.update(key, value)

        else:
            print("Error: unrecognized operation field of netcache header")

    # sniff infinitely the interface connected to the P4 switch and when a valid netcache
    # packet is captured, handle the packet via a callback to recv_switch_updates function
    def hot_reports_loop(self):
        cpu_port_intf = str(self.topo.get_cpu_port_intf(self.sw_name))
        sniff(iface=cpu_port_intf,
              prn=self.recv_switch_updates,
              filter="port 50000")

    def main(self):
        self.set_forwarding_table()
        self.set_value_tables()
        self.dummy_populate_vtables()
        self.hot_reports_loop()
class BaseController(object):
    """A base P4 switch controller that your controllers probably want to inherit from.

    Implements the CPU loop. You must override the
    `recv_packet(self, packet)` method to use it.
    """
    def __init__(self, sw_name, topology_db_file="./topology.db"):
        self.topo = Topology(db=topology_db_file)
        # print(self.topo)
        self.sw_name = sw_name
        self.thrift_port = self.topo.get_thrift_port(sw_name)
        self.cpu_port = self.topo.get_cpu_port_index(self.sw_name)
        self.controller = SimpleSwitchAPIAsyncWrapper(self.thrift_port)

    @classmethod
    @defer.inlineCallbacks
    def get_initialised(cls, sw_name, *args, **kwargs):
        obj = cls(sw_name, *args, **kwargs)
        yield obj._before_init()

        # TODO this actually wasn't a great idea and I shouldn't be
        # doing it, because it breaks expectations. But it works :D
        for mcls in reversed(cls.__mro__):
            if 'init' in mcls.__dict__:
                yield defer.maybeDeferred(mcls.init, obj)

        defer.returnValue(obj)

    def _before_init(self):
        return self.controller.reset_state()

    def recv_packet(self, msg):
        raise NotImplementedError(
            "Packet from switch received, but recv_packet has not been implemented"
        )

    @defer.inlineCallbacks
    def _consume_from_packet_queue(self):
        msg = yield self.packet_queue.get()
        self.recv_packet(msg)
        reactor.callLater(0, self._consume_from_packet_queue)

    @print_method_call
    def start_sniffer_thread(self):
        self.packet_queue = defer.DeferredQueue()
        cpu_port_intf = str(
            self.topo.get_cpu_port_intf(self.sw_name).replace("eth0", "eth1"))
        self.sniffer_thread = SnifferThread(reactor, self.packet_queue,
                                            cpu_port_intf)
        self.sniffer_thread.daemon = True  # die when the main thread dies
        self.sniffer_thread.start()

        workers = 4
        for i in range(workers):
            self._consume_from_packet_queue()

    @defer.inlineCallbacks
    def init(self):
        """Reminder: init() is Special."""
        if self.cpu_port:
            yield self.controller.mirroring_add(
                p4settings['CPU_PORT_MIRROR_ID'], self.cpu_port)

    @classmethod
    def run(cls, sw_name):
        """Deprecated."""
        task.react((lambda reactor, sw_name: cls.get_initialised(sw_name)),
                   [sw_name])
Ejemplo n.º 8
0
class PacketLossController(object):

    def __init__(self, num_hashes=3):

        self.topo = Topology(db="topology.db")
        self.controllers = {}
        self.num_hashes = num_hashes

        # gets a controller API for each switch: {"s1": controller, "s2": controller...}
        self.connect_to_switches()
        # creates the 3 hashes that will use the p4 switch
        self.create_local_hashes()

        # initializes the switch
        # resets all registers, configures the 3 x 2 hash functions
        # reads the registers
        # populates the tables and mirroring id
        self.init()
        self.registers = {}

    def init(self):
        self.reset_all_registers()
        self.set_crc_custom_hashes_all()
        self.read_registers()
        self.configure_switches()

    def connect_to_switches(self):
        for p4switch in self.topo.get_p4switches():
            thrift_port = self.topo.get_thrift_port(p4switch)
            self.controllers[p4switch] = SimpleSwitchAPI(thrift_port)

    def configure_switches(self):

        for sw, controller in self.controllers.items():
            # ads cpu port
            controller.mirroring_add(100, 3)

            # set the basic forwarding rules
            controller.table_add("forwarding", "set_egress_port", ["1"], ["2"])
            controller.table_add("forwarding", "set_egress_port", ["2"], ["1"])

            # set the remove header rules when there is a host in a port
            direct_hosts = self.topo.get_hosts_connected_to(sw)
            for host in direct_hosts:
                port = self.topo.node_to_node_port_num(sw,host)
                controller.table_add("remove_loss_header", "remove_header", [str(port)], [])

    def set_crc_custom_hashes_all(self):
        for sw_name in self.controllers:
            self.set_crc_custom_hashes(sw_name)

    def set_crc_custom_hashes(self, sw_name):
        custom_calcs = sorted(self.controllers[sw_name].get_custom_crc_calcs().items())
        i = 0
        # Set the first 3 hashes for the um
        for custom_crc32, width in custom_calcs[:self.num_hashes]:
            self.controllers[sw_name].set_crc32_parameters(custom_crc32, crc32_polinomials[i], 0xffffffff, 0xffffffff, True,
                                                           True)
            i += 1

        i = 0
        # Sets the 3 hashes for the dm, they have to be the same, thus we use the same index
        for custom_crc32, width in custom_calcs[self.num_hashes:]:
            self.controllers[sw_name].set_crc32_parameters(custom_crc32, crc32_polinomials[i], 0xffffffff, 0xffffffff,
                                                           True, True)
            i += 1

    def create_local_hashes(self):
        self.hashes = []
        for i in range(self.num_hashes):
            self.hashes.append(Crc(32, crc32_polinomials[i], True, 0xffffffff, True, 0xffffffff))

    def reset_all_registers(self):
        for sw, controller in self.controllers.items():
            for register in controller.get_register_arrays():
                controller.register_reset(register)

    def reset_registers(self, sw, stream, port, batch_id):
        start = (batch_id * REGISTER_BATCH_SIZE) + ((port-1) * REGISTER_PORT_SIZE)
        end = start + REGISTER_PORT_SIZE

        for register in self.controllers[sw].get_register_arrays():
            if stream in register:
                self.controllers[sw].register_write(register, [start, end], 0)

    def flow_to_bytestream(self, flow):
        # flow fields are: srcip , dstip, srcport, dstport, protocol, ip id
        return socket.inet_aton(flow[0]) + socket.inet_aton(flow[1]) + struct.pack(">HHBH",flow[2], flow[3], flow[4], flow[5])

    def read_registers(self):
        # reads all the registers
        self.registers = {sw: {} for sw in self.controllers.keys()}
        for sw, controller in self.controllers.items():
            for register in controller.get_register_arrays():
                self.registers[sw][register] = (controller.register_read(register))

    def extract_register_information(self, sw, stream, port, batch_id):
        # reads the region of a um or dm register: uses port, batch id.
        start = (batch_id * REGISTER_BATCH_SIZE) + ((port-1) * REGISTER_PORT_SIZE)
        end = start + REGISTER_PORT_SIZE
        res = {}
        for name, values in self.registers[sw].items():
            if stream in name:
                res[name] = values[start:end]

        return res

    def decode_meter_pair(self, um_registers, dm_registers):

        # xor the registers
        counters = [x - y for x, y in zip(um_registers['MyIngress.um_counter'], dm_registers['MyIngress.dm_counter'])]
        ip_src = [x ^ y for x, y in zip(um_registers['MyIngress.um_ip_src'], dm_registers['MyIngress.dm_ip_src'])]
        ip_dst = [x ^ y for x, y in zip(um_registers['MyIngress.um_ip_dst'], dm_registers['MyIngress.dm_ip_dst'])]
        ports_proto_id = [x ^ y for x, y in zip(um_registers['MyIngress.um_ports_proto_id'], dm_registers['MyIngress.dm_ports_proto_id'])]
        dropped_packets = set()
        while 1 in counters:
            i = counters.index(1)
            tmp_src = ip_src[i]
            tmp_dst = ip_dst[i]
            src = socket.inet_ntoa(struct.pack("!I", tmp_src))
            dst = socket.inet_ntoa(struct.pack("!I", tmp_dst))
            misc = ports_proto_id[i]
            id  = misc & 0xffff
            proto = misc >> 16 & 0xff
            dst_port = misc >> 24 & 0xffff
            src_port = misc >> 40 & 0xffff
            flow = (src, dst, src_port, dst_port, proto, id)

            # get the three indexes
            flow_stream = self.flow_to_bytestream(flow)
            index0 = self.hashes[0].bit_by_bit_fast(flow_stream) % REGISTER_PORT_SIZE
            index1 = self.hashes[1].bit_by_bit_fast(flow_stream) % REGISTER_PORT_SIZE
            index2 = self.hashes[2].bit_by_bit_fast(flow_stream) % REGISTER_PORT_SIZE

            # clean this entries everywhere an continue
            counters[index0] -= 1
            counters[index1] -= 1
            counters[index2] -= 1

            ip_src[index0] ^= tmp_src
            ip_src[index1] ^= tmp_src
            ip_src[index2] ^= tmp_src

            ip_dst[index0] ^= tmp_dst
            ip_dst[index1] ^= tmp_dst
            ip_dst[index2] ^= tmp_dst

            ports_proto_id[index0] ^= misc
            ports_proto_id[index1] ^= misc
            ports_proto_id[index2] ^= misc

            # if there is a bad sync we skip this round
            # do not ask this in the readme
            # mainly the problem is the amount of buffer the switch allows
            if any(x < 0 for x in counters):
                return dropped_packets

            dropped_packets.add(flow)

        return dropped_packets


    def verify_link(self, sw1, sw2, batch_id):

        sw1_to_sw2_interface = self.topo.node_to_node_port_num(sw1, sw2)
        sw2_to_sw1_interface = self.topo.node_to_node_port_num(sw2, sw1)

        sw1_um = self.extract_register_information(sw1, 'um', sw1_to_sw2_interface, batch_id)
        sw2_dm = self.extract_register_information(sw2, 'dm', sw2_to_sw1_interface, batch_id)

        dropped_packets = self.decode_meter_pair(sw1_um, sw2_dm)

        # clean registers
        self.reset_registers(sw1, 'um', sw1_to_sw2_interface, batch_id)
        self.reset_registers(sw2, 'dm', sw2_to_sw1_interface, batch_id)

        # report
        if dropped_packets:
            print "Packets dropped: {} at link {}->{}:".format(len(dropped_packets), sw1, sw2)
            print "Details:"
            for packet in dropped_packets:
                print packet

    def check_sw_links(self, sw, batch_id):

        # just in case for the delay
        # increase decrease depending on the batch timeing
        time.sleep(0.25)

        # read all registers since its a small topo
        self.read_registers()

        # Process the right links and clean registers
        neighboring_p4_switches = [x for x in self.topo.get_neighbors(sw) if
                                   x in self.topo.get_p4switches()]

        for neighboring_switch in neighboring_p4_switches:
            self.verify_link(sw, neighboring_switch, batch_id)

    # When a batch_id changes the controller gets triggered
    def recv_msg_cpu(self, pkt):
        interface = pkt.sniffed_on
        print interface
        switch_name = interface.split("-")[0]
        packet = Ether(str(pkt))
        if packet.type == 0x1234:
            loss_header = LossHeader(packet.payload)
            batch_id = loss_header.batch_id >> 7
            print switch_name, batch_id
            self.check_sw_links(switch_name, batch_id)

    def run_cpu_port_loop(self):
        cpu_interfaces = [str(self.topo.get_cpu_port_intf(sw_name).replace("eth0", "eth1")) for sw_name in self.controllers]
        sniff(iface=cpu_interfaces, prn=self.recv_msg_cpu)
Ejemplo n.º 9
0
class packetReceicer(threading.Thread):
    def __init__(self, sw_name, program):
        threading.Thread.__init__(self)
        if program == "f":
            self.topo = Topology(
                db="../p4src_flowsize/topology.db")  #set the topology
        elif program == "i":
            self.topo = Topology(
                db="../p4src_interval/topology.db")  #set the topology
        self.sw_name = sw_name
        self.thrift_port = self.topo.get_thrift_port(sw_name)
        self.cpu_port = self.topo.get_cpu_port_index(self.sw_name)
        self.controller = SimpleSwitchAPI(self.thrift_port)
        self.flow = {}
        self.flag = True
        self.init()

    def init(self):
        self.add_mirror()
        self.counter = 1
        self.logs = open("../switch_log/" + self.sw_name + ".log", "w")
        self.logs_info = open("../switch_log/" + self.sw_name + "_info.log",
                              "w")
        self.logs_info.write("SWITCH[" + self.sw_name + "]\n")
        self.logs.close()
        self.logs_info.close()

    def add_mirror(self):
        if self.cpu_port:
            self.controller.mirroring_add(
                100, self.cpu_port)  # correspond to the 100 in p4 code
            #is there any probability to increase the mirro port to add cpu port?

    def recv_msg_cpu(self, pkt):
        ## console output starts
        #print
        #print("["+self.sw_name+"] received packet number:"+str(self.counter))
        self.counter += 1
        cpu = CPU(str(pkt))
        #ls(cpu)

        ## console output ends
        type = (cpu.flags >> 2)
        if self.flag == True:
            logs = open("../switch_log/" + self.sw_name + ".log", "w")
            self.flag = False
            if type == 0:
                logs.write("flowsize information collecting\n")
            else:
                logs.write("interval information collecting\n")

            logs.close()

        self.gen_per_packet_log(cpu)
        self.collect_log(cpu)
        if (self.counter % 1000 == 0):
            self.gen_log()

    def gen_log(self):
        logs_info = open("../switch_log/" + self.sw_name + "_info.log", "a")
        logs_info.write("[flow number: " + str(len(self.flow)) + "]\n")
        change = lambda x: '.'.join(
            [str(x / (256**i) % 256) for i in range(3, -1, -1)])

        cnt = 0
        for i in self.flow:
            cnt += self.flow[i]["packnum"]
            tmp = i.split(":")
            tmp[0] = change(int(tmp[0]))
            tmp[1] = change(int(tmp[1]))
            tmp = " : ".join(tmp)
            logs_info.write("flow " + tmp + " ")

            logs_info.write(str(sorted(self.flow[i].items())))
            logs_info.write("\n")
        logs_info.write("[packet number sum:" + str(cnt) + "]\n\n")

        logs_info.close()

    def collect_log(self, cpu):
        flow_key = str(cpu.srcAddr) + ":" + str(cpu.dstAddr) + ":" + str(
            cpu.protocol) + ":" + str(cpu.srcPort) + ":" + str(cpu.dstPort)
        if self.flow.has_key(flow_key):
            self.flow[flow_key]["packnum"] += 1
            self.flow[flow_key][self.get_lev(cpu.delay)] += 1
        else:
            self.flow[flow_key]={"packnum":1,"0->1":0,"1->2":0,\
                "2->3":0,"3->4":0,"4->5":0,"5->6":0,"6->7":0\
                ,"7+":0}#"7->8":0,"8->9":0,"9+":0}
            self.flow[flow_key][self.get_lev(cpu.delay)] += 1

    def get_lev(self, delay):
        time_interval = 1000
        if delay < time_interval * 1:
            return "0->1"
        elif delay < time_interval * 2:
            return "1->2"
        elif delay < time_interval * 3:
            return "2->3"
        elif delay < time_interval * 4:
            return "3->4"
        elif delay < time_interval * 5:
            return "4->5"
        elif delay < time_interval * 6:
            return "5->6"
        elif delay < time_interval * 7:
            return "6->7"
        # elif delay<time_interval*8:
        #     return "7->8"
        # elif delay<time_interval*9:
        #     return "8->9"
        else:
            return "7+"

    def gen_per_packet_log(self, cpu):
        logs = open("../switch_log/" + self.sw_name + ".log", "a")
        change = lambda x: '.'.join(
            [str(x / (256**i) % 256) for i in range(3, -1, -1)])

        srcAddr = change(cpu.srcAddr)
        dstAddr = change(cpu.dstAddr)
        tmp_delay = str(cpu.delay)
        delay = tmp_delay[-9:-6] + "s " + tmp_delay[-6:-3] + "ms " + tmp_delay[
            -3:] + "us"
        tmp_interval = str(cpu.interval)
        interval = tmp_interval[-9:-6] + "s " + tmp_interval[
            -6:-3] + "ms " + tmp_interval[-3:] + "us"
        sketch_fg = (cpu.flags >> 1) & 0x1
        has_SFH = cpu.flags & 0x1
        type = (cpu.flags >> 2) & 0x1

        logs.write('{"switch name":"' + self.sw_name + '",')
        logs.write('"packet number":"' + str(self.counter - 1) +
                   '","packet_info":{')
        logs.write('"srcAddr":"' + str(srcAddr) + '",')
        logs.write('"dstAddr":"' + str(dstAddr) + '",')
        logs.write('"protocol":"' + str(cpu.protocol) + '",')
        logs.write('"srcPort":"' + str(cpu.srcPort) + '",')
        logs.write('"dstPort":"' + str(cpu.dstPort) + '",')
        logs.write('"delay ":"' + delay + '",')
        logs.write('"interval":"' + interval)
        logs.write('"timestamp":' + str(time.time()))
        if type == 0:
            logs.write('",' + '"using sketch":"' + str(sketch_fg) + '",')
            logs.write('"bring SFH":' + str(bool(has_SFH)))
        else:
            logs.write('",' + '"using sketch":"' + str(sketch_fg) + '",')
            logs.write('"bring MIH":' + str(bool(has_SFH)))
        logs.write(" }}\n")
        logs.close()

    def run_cpu_port_loop(self):
        cpu_port_intf = str(
            self.topo.get_cpu_port_intf(self.sw_name).replace("eth0", "eth1"))
        #the cpu has two ports   could use two thread to sniff
        print(cpu_port_intf)
        print
        print(sniff(iface=cpu_port_intf, prn=self.recv_msg_cpu))

    def run(self):
        self.run_cpu_port_loop()
Ejemplo n.º 10
0
class L2Controller(object):
    def __init__(self, sw_name):
        self.topo = Topology(db="topology.db")
        self.sw_name = sw_name
        self.thrift_port = self.topo.get_thrift_port(sw_name)
        self.cpu_port = self.topo.get_cpu_port_index(self.sw_name)
        self.controller = SimpleSwitchAPI(self.thrift_port)

        self.init()

    def init(self):
        self.controller.reset_state()
        self.add_boadcast_groups()
        self.add_mirror()

    def add_mirror(self):
        if self.cpu_port:
            self.controller.mirroring_add(100, self.cpu_port)

    def add_boadcast_groups(self):
        interfaces_to_port = self.topo[
            self.sw_name]["interfaces_to_port"].copy()
        # filter lo and cpu port
        interfaces_to_port.pop('lo', None)
        interfaces_to_port.pop(self.topo.get_cpu_port_intf(self.sw_name), None)

        mc_grp_id = 1
        rid = 0
        for ingress_port in interfaces_to_port.values():

            port_list = interfaces_to_port.values()[:]
            del (port_list[port_list.index(ingress_port)])

            #add multicast group
            self.controller.mc_mgrp_create(mc_grp_id)

            #add multicast node group
            handle = self.controller.mc_node_create(rid, port_list)

            #associate with mc grp
            self.controller.mc_node_associate(mc_grp_id, handle)

            #fill broadcast table
            self.controller.table_add("broadcast", "set_mcast_grp",
                                      [str(ingress_port)], [str(mc_grp_id)])

            mc_grp_id += 1
            rid += 1

    def learn_route(self, learning_data):
        for mac_addr, ingress_port in learning_data:
            print "mac: %012X ingress_port: %s " % (mac_addr, ingress_port)
            self.controller.table_add("smac", "NoAction", [str(mac_addr)])
            self.controller.table_add("dmac", "forward", [str(mac_addr)],
                                      [str(ingress_port)])

    def learn_connection(self, srcA, dstA, srcP, dstP):
        print("========== UPDATING CONNECTION ==========")
        connection = srcA
        connection = connection << 32
        connection = connection | dstA
        connection = connection << 16
        connection = connection | srcP
        connection = connection << 16
        connection = connection | dstP
        self.controller.table_add("tcp_forward", "NoAction", [str(connection)],
                                  [])

        connection = dstA
        connection = connection << 32
        connection = connection | srcA
        connection = connection << 16
        connection = connection | dstP
        connection = connection << 16
        connection = connection | srcP
        self.controller.table_add("tcp_forward", "NoAction", [str(connection)],
                                  [])

        print("========== UPDATE FINISHED ==========")

    def recv_msg_cpu(self, pkt):
        packet = Ether(str(pkt))

        if packet.type == 0x1234:
            learning = CpuRoute(packet.payload)
            print("got a packet of type route")
            self.learn_route([(learning.macAddr, learning.ingress_port)])
        if packet.type == 0xF00D:
            learning = CpuCookie(packet.payload)
            print("got a packet of type cookie")
            self.learn_connection(learning.srcAddr, learning.dstAddr,
                                  learning.srcPort, learning.dstPort)

    def run_cpu_port_loop(self):
        cpu_port_intf = str(
            self.topo.get_cpu_port_intf(self.sw_name).replace("eth0", "eth1"))
        sniff(iface=cpu_port_intf, prn=self.recv_msg_cpu)