def print_paths_to_database(paths, reverse_map, table_name):
    # Timeout = 6000s

    insert_string = "INSERT INTO %s VALUES (?, ?, ?, ?, ?, ?, ?)" % table_name

    queries = []
    for p_node in paths:
        path_string = ""
        for port in p_node["visits"]:
            path_string += "%d " % port
        path_string += "%d " % p_node["port"]
        port_count = len(p_node["visits"]) + 1

        rl_id = ""
        for (n, r, s) in p_node["hdr"].applied_rule_ids:
            rl_id += r + " "
        rule_count = len(p_node["hdr"].applied_rule_ids)

        input_port = p_node["visits"][0]
        output_port = p_node["port"]
        output_hs = p_node["hdr"].copy()
        applied_rule_ids = list(output_hs.applied_rule_ids)
        input_hs = trace_hs_back(applied_rule_ids, output_hs, output_port)[0]
        header_string = json.dumps(parse_hs(cisco_router(1).hs_format, input_hs.hs_list[0]))

        # header_string = byte_array_to_pretty_hs_string(input_hs.hs_list[0])
        queries.append((header_string, input_port, output_port, path_string, port_count, rl_id, rule_count))

    conn = sqlite3.connect(DATABASE_FILE, 6000)
    for query in queries:
        conn.execute(insert_string, query)

    conn.commit()
    conn.close()
def load_augmented_tf_to_nusmv(replication_factor,dir_path):
    '''
    For Model Checking Project.
    Creates NuSMV file from transfer function objects of replicated Stanford network.
    '''
    nusmv = NuSMV()
    cs = cisco_router(1)
    nusmv.set_output_port_offset(cs.PORT_TYPE_MULTIPLIER * cs.OUTPUT_PORT_TYPE_CONST)
    (port_map,port_reverse_map) = load_port_to_id_map(dir_path)
    end_ports = []
    for replicate in range(1,replication_factor+1):
        for rtr_name in rtr_names:
            f = TF(1)
            f.load_object_from_file("%s/%s%d.tf"%(dir_path,rtr_name,replicate))
            nusmv.generate_nusmv_trans(f, [])
        end_ports_subset = get_end_ports(port_map,"%d"%replicate)
        end_ports.extend(end_ports_subset)

    f = TF(1)
    f.load_object_from_file("%s/root.tf"%(dir_path))
    nusmv.generate_nusmv_trans(f, [])

    f = TF(1)
    f.load_object_from_file("%s/backbone_topology.tf"%dir_path)
    nusmv.generate_nusmv_trans(f,end_ports)
    nusmv.generate_nusmv_input()

    return nusmv
def load_augmented_tf_to_nusmv(replication_factor, dir_path):
    '''
    For Model Checking Project.
    Creates NuSMV file from transfer function objects of replicated Stanford network.
    '''
    nusmv = NuSMV()
    cs = cisco_router(1)
    nusmv.set_output_port_offset(cs.PORT_TYPE_MULTIPLIER *
                                 cs.OUTPUT_PORT_TYPE_CONST)
    (port_map, port_reverse_map) = load_port_to_id_map(dir_path)
    end_ports = []
    for replicate in range(1, replication_factor + 1):
        for rtr_name in rtr_names:
            f = TF(1)
            f.load_object_from_file("%s/%s%d.tf" %
                                    (dir_path, rtr_name, replicate))
            nusmv.generate_nusmv_trans(f, [])
        end_ports_subset = get_end_ports(port_map, "%d" % replicate)
        end_ports.extend(end_ports_subset)

    f = TF(1)
    f.load_object_from_file("%s/root.tf" % (dir_path))
    nusmv.generate_nusmv_trans(f, [])

    f = TF(1)
    f.load_object_from_file("%s/backbone_topology.tf" % dir_path)
    nusmv.generate_nusmv_trans(f, end_ports)
    nusmv.generate_nusmv_input()

    return nusmv
def load_port_to_id_map(path):
    '''
    load the map from port ID to name of box-port name.
    '''
    f = open("%s/port_map.txt"%path,'r')
    id_to_name = {}
    map = {}
    rtr = ""
    cs = cisco_router(1)
    for line in f:
        if line.startswith("$"):
            rtr = line[1:].strip()
            map[rtr] = {}
        elif line != "":
            tokens = line.strip().split(":")
            map[rtr][tokens[0]] = int(tokens[1])
            id_to_name[tokens[1]] = "%s-%s"%(rtr,tokens[0])
            out_port = int(tokens[1]) + cs.PORT_TYPE_MULTIPLIER * cs.OUTPUT_PORT_TYPE_CONST
            id_to_name["%s"%out_port] = "%s-%s"%(rtr,tokens[0])
    return (map,id_to_name)
def load_tf_to_nusmv():
    '''
    For Model Checking Project.
    Creates NuSMV file from transfer function objects of Stanford network.
    '''
    nusmv = NuSMV()
    cs = cisco_router(1)
    nusmv.set_output_port_offset(cs.PORT_TYPE_MULTIPLIER * cs.OUTPUT_PORT_TYPE_CONST)
    for rtr_name in rtr_names:
        f = TF(1)
        f.load_object_from_file("tf_stanford_backbone/%s.tf"%rtr_name)
        nusmv.generate_nusmv_trans(f, [])

    (port_map,port_reverse_map) = load_stanford_backbone_port_to_id_map()
    end_ports = get_end_ports(port_map,"")
    f = TF(1)
    f.load_object_from_file("tf_stanford_backbone/backbone_topology.tf")
    nusmv.generate_nusmv_trans(f,end_ports)
    nusmv.generate_nusmv_input()

    return nusmv
def load_port_to_id_map(path):
    '''
    load the map from port ID to name of box-port name.
    '''
    f = open("%s/port_map.txt" % path, 'r')
    id_to_name = {}
    map = {}
    rtr = ""
    cs = cisco_router(1)
    for line in f:
        if line.startswith("$"):
            rtr = line[1:].strip()
            map[rtr] = {}
        elif line != "":
            tokens = line.strip().split(":")
            map[rtr][tokens[0]] = int(tokens[1])
            id_to_name[tokens[1]] = "%s-%s" % (rtr, tokens[0])
            out_port = int(
                tokens[1]
            ) + cs.PORT_TYPE_MULTIPLIER * cs.OUTPUT_PORT_TYPE_CONST
            id_to_name["%s" % out_port] = "%s-%s" % (rtr, tokens[0])
    return (map, id_to_name)
def load_tf_to_nusmv():
    '''
    For Model Checking Project.
    Creates NuSMV file from transfer function objects of Stanford network.
    '''
    nusmv = NuSMV()
    cs = cisco_router(1)
    nusmv.set_output_port_offset(cs.PORT_TYPE_MULTIPLIER *
                                 cs.OUTPUT_PORT_TYPE_CONST)
    for rtr_name in rtr_names:
        f = TF(1)
        f.load_object_from_file("tf_stanford_backbone/%s.tf" % rtr_name)
        nusmv.generate_nusmv_trans(f, [])

    (port_map, port_reverse_map) = load_stanford_backbone_port_to_id_map()
    end_ports = get_end_ports(port_map, "")
    f = TF(1)
    f.load_object_from_file("tf_stanford_backbone/backbone_topology.tf")
    nusmv.generate_nusmv_trans(f, end_ports)
    nusmv.generate_nusmv_input()

    return nusmv
Beispiel #8
0
def print_paths_to_database(paths, reverse_map, table_name):
    # Timeout = 6000s

    insert_string = "INSERT INTO %s VALUES (?, ?, ?, ?, ?, ?, ?)" % table_name

    queries = []
    for p_node in paths:
        path_string = ""
        for port in p_node["visits"]:
            path_string += ("%d " % port)
        path_string += ("%d " % p_node["port"])
        port_count = len(p_node["visits"]) + 1

        rl_id = ""
        for (n, r, s) in p_node["hdr"].applied_rule_ids:
            rl_id += (r + " ")
        rule_count = len(p_node["hdr"].applied_rule_ids)

        input_port = p_node["visits"][0]
        output_port = p_node["port"]
        output_hs = p_node["hdr"].copy()
        applied_rule_ids = list(output_hs.applied_rule_ids)
        input_hs = trace_hs_back(applied_rule_ids, output_hs, output_port)[0]
        header_string = json.dumps(
            parse_hs(cisco_router(1).hs_format, input_hs.hs_list[0]))

        #header_string = byte_array_to_pretty_hs_string(input_hs.hs_list[0])
        queries.append((header_string, input_port, output_port, path_string,
                        port_count, rl_id, rule_count))

    conn = sqlite3.connect(DATABASE_FILE, 6000)
    for query in queries:
        conn.execute(insert_string, query)

    conn.commit()
    conn.close()
Beispiel #9
0
    a special exception, as described in included LICENSE_EXCEPTION.txt.

Created on Sep 26, 2011

@author: Peyman Kazemian
'''
from headerspace.hs import *
from headerspace.tf import *
from headerspace.slice import *
from config_parser.cisco_router_parser import cisco_router
from config_parser.helper import *
from time import time
import math
import random

cs = cisco_router(1)
rtr_port_const = 100

rtr_ids = {
    "bbra_rtr": 100,
    "bbrb_rtr": 200,
    "boza_rtr": 300,
    "bozb_rtr": 400,
    "coza_rtr": 500,
    "cozb_rtr": 600,
    "goza_rtr": 700,
    "gozb_rtr": 800,
    "poza_rtr": 900,
    "pozb_rtr": 1000,
    "roza_rtr": 1100,
    "rozb_rtr": 1200,
def main():
    global src_port_ids_global
    global dst_port_ids_global
    global port_map_global
    global port_reverse_map_global
    global ntf_global
    global ttf_global
    global DATABASE_FILE

    parser = ArgumentParser(description="Generate Test Packets for stanford")
    parser.add_argument("-p", dest="percentage", type=int, default="100", help="Percentage of test terminals")
    parser.add_argument("-f", dest="filename", default="stanford.sqlite", help="Filename of the database")
    parser.add_argument("-e", action="store_true", default=False, help="Edge port only")
    args = parser.parse_args()

    DATABASE_FILE = "results/%s" % args.filename

    cs = cisco_router(1)
    output_port_addition = cs.PORT_TYPE_MULTIPLIER * cs.OUTPUT_PORT_TYPE_CONST

    # Load .tf files
    ntf_global = load_stanford_backbone_ntf()
    ttf_global = load_stanford_backbone_ttf()
    (port_map_global, port_reverse_map_global) = load_stanford_backbone_port_to_id_map()

    # Initialize the database
    if os.access(DATABASE_FILE, os.F_OK):
        os.remove(DATABASE_FILE)

    conn = sqlite3.connect(DATABASE_FILE)
    conn.execute(
        "CREATE TABLE %s (rule TEXT, input_port TEXT, output_port TEXT, action TEXT, file TEXT, line TEXT)"
        % TABLE_NETWORK_RULES
    )
    conn.execute("CREATE TABLE %s (rule TEXT, input_port TEXT, output_port TEXT)" % TABLE_TOPOLOGY_RULES)
    conn.execute(
        "CREATE TABLE %s (header TEXT, input_port INTEGER, output_port INTEGER, ports TEXT, no_of_ports INTEGER, rules TEXT, no_of_rules INTEGER)"
        % TABLE_TEST_PACKETS
    )
    conn.execute(
        "CREATE TABLE %s (header TEXT, input_port INTEGER, output_port INTEGER, ports TEXT, no_of_ports INTEGER, rules TEXT, no_of_rules INTEGER)"
        % TABLE_TEST_PACKETS_LOCALLY_COMPRESSED
    )
    conn.execute("CREATE TABLE %s (rules TEXT, no_of_rules INTEGER)" % TABLE_TEST_PACKETS_GLOBALLY_COMPRESSED)
    conn.execute("CREATE TABLE %s (rule TEXT)" % TABLE_RESULT_RULES)

    rule_count = 0
    for tf in ntf_global.tf_list:
        rule_count += len(tf.rules)
        for rule in tf.rules:
            query = "INSERT INTO %s VALUES (?, ?, ?, ?, ?, ?)" % TABLE_NETWORK_RULES
            conn.execute(
                query,
                (
                    rule["id"],
                    " ".join(map(str, rule["in_ports"])),
                    " ".join(map(str, rule["out_ports"])),
                    rule["action"],
                    rule["file"],
                    " ".join(map(str, rule["line"])),
                ),
            )
    print "Total Rules: %d" % rule_count
    conn.commit()

    rule_count = len(ttf_global.rules)
    for rule in ttf_global.rules:
        query = "INSERT INTO %s VALUES (?, ?, ?)" % TABLE_TOPOLOGY_RULES
        conn.execute(query, (rule["id"], " ".join(map(str, rule["in_ports"])), " ".join(map(str, rule["out_ports"]))))
    print "Total Links: %d" % rule_count

    # Generate all ports
    for rtr in port_map_global.keys():
        src_port_ids_global |= set(port_map_global[rtr].values())

    total_length = len(src_port_ids_global)
    if args.e == True:
        src_port_ids_global = get_end_ports()

    new_length = len(src_port_ids_global) * args.percentage / 100
    src_port_ids_global = random.sample(src_port_ids_global, new_length)
    print "Total Length: %d" % total_length
    print "New Length: %d" % new_length

    print src_port_ids_global

    for port in src_port_ids_global:
        port += output_port_addition
        dst_port_ids_global.add(port)

    # src_port_ids_global = [300013]
    # dst_port_ids_global = [320010]

    conn.commit()
    conn.close()

    # Run reachability
    start_time = time.time()

    pool = Pool()
    result = pool.map_async(find_test_packets, src_port_ids_global)

    # Close
    pool.close()
    pool.join()

    end_time = time.time()

    test_packet_count = result.get()
    total_paths = sum(test_packet_count)
    print "========== Before Compression ========="
    print "Total Paths = %d" % total_paths
    print "Average packets per port = %f" % (float(total_paths) / len(src_port_ids_global))
    print "Total Time = %fs" % (end_time - start_time)

    # Global Compressing
    start_time = time.time()

    conn = sqlite3.connect(DATABASE_FILE, 6000)
    result_rule_lists = []
    query = "SELECT rules FROM %s" % TABLE_TEST_PACKETS_LOCALLY_COMPRESSED
    rows = conn.execute(query)

    for row in rows:
        result_rule_lists.append(row[0].split())
    conn.close()

    chunk_size = 80000
    while True:
        print "Start a new round!"
        conn = sqlite3.connect(DATABASE_FILE, 6000)
        conn.execute("DROP TABLE IF EXISTS %s" % TABLE_SCRATCHPAD)
        conn.execute("CREATE TABLE %s (rules TEXT, no_of_rules INTEGER)" % TABLE_SCRATCHPAD)
        conn.commit()
        conn.close()

        start_len = len(result_rule_lists)
        print start_len

        pool = Pool()
        no_of_chunks = len(result_rule_lists) / chunk_size + 1
        rule_list_chunks = chunks(result_rule_lists, no_of_chunks)
        result = pool.map_async(rule_lists_compress, rule_list_chunks)

        # Close
        pool.close()
        pool.join()
        result.get()

        print "End of this round."

        result_rule_lists = read_rule_lists_from_database(TABLE_SCRATCHPAD)

        end_len = len(result_rule_lists)
        if float(end_len) / float(start_len) > 0.99:
            break

    end_time = time.time()

    query = "INSERT INTO %s VALUES (?, ?)" % TABLE_TEST_PACKETS_GLOBALLY_COMPRESSED
    query2 = "INSERT INTO %s VALUES (?)" % TABLE_RESULT_RULES

    total_paths = len(result_rule_lists)
    total_length = 0

    conn = sqlite3.connect(DATABASE_FILE, 6000)
    conn.execute("DROP TABLE IF EXISTS %s" % TABLE_TEST_PACKETS_GLOBALLY_COMPRESSED)
    conn.execute("CREATE TABLE %s (rules TEXT, no_of_rules INTEGER)" % TABLE_TEST_PACKETS_GLOBALLY_COMPRESSED)

    for rule_list in result_rule_lists:
        total_length += len(rule_list)
        conn.execute(query, (" ".join(rule_list), len(rule_list)))
        for rule in rule_list:
            conn.execute(query2, (rule,))

    conn.commit()
    conn.close()

    print "========== After Compression ========="
    print "Total Paths = %d" % total_paths
    print "Average packets per port = %f" % (float(total_paths) / len(src_port_ids_global))
    print "Average length of rule list = %f" % (float(total_length) / total_paths)
    print "Total Time = %fs" % (end_time - start_time)
Beispiel #11
0
def generate_transfer_functions(settings):
    st = time()

    if ("replace_vlans" in settings.keys()):
        has_replaced_vlan = True
    else:
        has_replaced_vlan = False

    if "arp_table_file_sfx" in settings.keys():
        arp_sfx = settings["arp_table_file_sfx"]
    else:
        arp_sfx = "_arp_table.txt"
    if "mac_table_file_sfx" in settings.keys():
        mac_sfx = settings["mac_table_file_sfx"]
    else:
        mac_sfx = "_mac_table.txt"
    if "config_file_sfx" in settings.keys():
        config_sfx = settings["config_file_sfx"]
    else:
        config_sfx = "_config.txt"
    if "spanning_tree_file_sfx" in settings.keys():
        span_sfx = settings["spanning_tree_file_sfx"]
    else:
        span_sfx = "_spanning_tree.txt"
    if "route_table_file_sfx" in settings.keys():
        route_sfx = settings["route_table_file_sfx"]
    else:
        route_sfx = "_route.txt"

    # generate transfer functions
    L = 0
    id = 1
    cs_list = {}
    for i in range(len(settings["rtr_names"])):
        rtr_name = settings["rtr_names"][i]
        cs = cisco_router(id)
        if has_replaced_vlan:
            cs.set_replaced_vlan(settings["replace_vlans"][i])
        if "hs_format" in settings.keys():
            cs.set_hs_format(settings["hs_format"])
        L = cs.hs_format["length"]
        tf = TF(L)
        tf.set_prefix_id(rtr_name)
        cs.read_arp_table_file("%s/%s%s" %
                               (settings["input_path"], rtr_name, arp_sfx))
        cs.read_mac_table_file("%s/%s%s" %
                               (settings["input_path"], rtr_name, mac_sfx))
        cs.read_spanning_tree_file("%s/%s%s"%\
                                   (settings["input_path"],rtr_name,span_sfx))
        cs.read_config_file("%s/%s%s" %
                            (settings["input_path"], rtr_name, config_sfx))
        cs.read_route_file("%s/%s%s" %
                           (settings["input_path"], rtr_name, route_sfx))
        if ("optimize_fwd_table" not in settings.keys() or \
            settings["optimize_fwd_table"]):
            cs.optimize_forwarding_table()
        if ("fwd_table_only" in settings.keys()
                and settings["fwd_table_only"]):
            cs.generate_port_ids_only_for_output_ports()
            cs.generate_fwd_table_tf(tf)
        else:
            cs.generate_port_ids([])
            cs.generate_transfer_function(tf)
        if (not os.path.isdir(settings["output_path"])):
            os.makedirs(settings["output_path"])
        tf.save_as_json("%s/%s.tf.json" % (settings["output_path"], rtr_name))
        tf.save_object_to_file("%s/%s.tf" %
                               (settings["output_path"], rtr_name))
        id += 1
        cs_list[rtr_name] = cs

    #generate port maps
    f = open("%s/port_map.json" % settings["output_path"], 'w')
    port_map = {}
    for rtr in cs_list.keys():
        cs = cs_list[rtr]
        port_map[rtr] = cs.port_to_id
    f.write(json.dumps(port_map))
    f.close()

    #write topology:
    if "topology" in settings.keys():
        print "===Generating Topology==="
        out_port_addition = cisco_router.PORT_TYPE_MULTIPLIER * \
              cisco_router.OUTPUT_PORT_TYPE_CONST
        topology = settings["topology"]
        tf = TF(L)
        for (from_router, from_port, to_router, to_port) in topology:
            from_cs = cs_list[from_router]
            to_cs = cs_list[to_router]
            rule = TF.create_standard_rule(\
                          [from_cs.get_port_id(from_port) + out_port_addition],\
                            None,[to_cs.get_port_id(to_port)],\
                            None, None, "", [])
            tf.add_link_rule(rule)
            rule = TF.create_standard_rule(\
                          [to_cs.get_port_id(to_port) + out_port_addition], \
                            None,[from_cs.get_port_id(from_port)], \
                            None, None, "", [])
            tf.add_link_rule(rule)
        tf.save_as_json("%s/topology.tf.json" % settings["output_path"])
        tf.save_object_to_file("%s/topology.tf" % settings["output_path"])

    en = time()
    print "completed in ", en - st, "seconds"
def get_end_ports(name_to_id, index):

    linked_ports = [
        ("bbra_rtr", "te7/3"),
        ("bbra_rtr", "te7/2"),
        ("bbra_rtr", "te7/1"),
        ("bbra_rtr", "te1/3"),
        ("bbra_rtr", "te1/4"),
        ("bbra_rtr", "te6/1"),
        ("bbra_rtr", "te6/3"),
        ("bbrb_rtr", "te7/1"),
        ("bbrb_rtr", "te7/2"),
        ("bbrb_rtr", "te7/4"),
        ("bbrb_rtr", "te6/3"),
        ("bbrb_rtr", "te6/1"),
        ("bbrb_rtr", "te1/1"),
        ("bbrb_rtr", "te1/3"),
        ("boza_rtr", "te2/1"),
        ("boza_rtr", "te3/1"),
        ("boza_rtr", "te2/3"),
        ("bozb_rtr", "te2/3"),
        ("bozb_rtr", "te2/1"),
        ("bozb_rtr", "te3/1"),
        ("coza_rtr", "te3/1"),
        ("coza_rtr", "te2/1"),
        ("coza_rtr", "te2/3"),
        ("cozb_rtr", "te2/3"),
        ("cozb_rtr", "te2/1"),
        ("cozb_rtr", "te3/1"),
        ("goza_rtr", "te2/1"),
        ("goza_rtr", "te3/1"),
        ("goza_rtr", "te2/3"),
        ("gozb_rtr", "te2/3"),
        ("gozb_rtr", "te2/1"),
        ("gozb_rtr", "te3/1"),
        ("poza_rtr", "te2/1"),
        ("poza_rtr", "te3/1"),
        ("poza_rtr", "te2/3"),
        ("pozb_rtr", "te2/3"),
        ("pozb_rtr", "te2/1"),
        ("pozb_rtr", "te3/1"),
        ("roza_rtr", "te3/1"),
        ("roza_rtr", "te2/1"),
        ("roza_rtr", "te2/3"),
        ("rozb_rtr", "te2/3"),
        ("rozb_rtr", "te2/1"),
        ("rozb_rtr", "te3/1"),
        ("soza_rtr", "te2/1"),
        ("soza_rtr", "te3/1"),
        ("soza_rtr", "te2/3"),
        ("sozb_rtr", "te2/3"),
        ("sozb_rtr", "te3/1"),
        ("sozb_rtr", "te2/1"),
        ("yoza_rtr", "te7/1"),
        ("yoza_rtr", "te1/3"),
        ("yoza_rtr", "te1/1"),
        ("yoza_rtr", "te1/2"),
        ("yozb_rtr", "te1/2"),
        ("yozb_rtr", "te1/3"),
        ("yozb_rtr", "te2/1"),
        ("yozb_rtr", "te1/1"),
    ]

    end_ports = []
    cs = cisco_router(1)
    for rtr_name in rtr_names:
        mod_rtr_name = "%s%s" % (rtr_name, index)
        for rtr_port in name_to_id[mod_rtr_name]:
            if (rtr_name, rtr_port) not in linked_ports:
                end_ports.append(name_to_id[mod_rtr_name][rtr_port] +
                                 cs.PORT_TYPE_MULTIPLIER *
                                 cs.OUTPUT_PORT_TYPE_CONST)

    return end_ports
Beispiel #13
0
def main():
    global src_port_ids_global
    global dst_port_ids_global
    global port_map_global
    global port_reverse_map_global
    global ntf_global
    global ttf_global
    global DATABASE_FILE

    parser = ArgumentParser(description="Generate Test Packets for stanford")
    parser.add_argument("-p",
                        dest="percentage",
                        type=int,
                        default="100",
                        help="Percentage of test terminals")
    parser.add_argument("-f",
                        dest="filename",
                        default="stanford.sqlite",
                        help="Filename of the database")
    parser.add_argument("-e",
                        action="store_true",
                        default=False,
                        help="Edge port only")
    args = parser.parse_args()

    DATABASE_FILE = "results/%s" % args.filename

    cs = cisco_router(1)
    output_port_addition = cs.PORT_TYPE_MULTIPLIER * cs.OUTPUT_PORT_TYPE_CONST

    # Load .tf files
    ntf_global = load_stanford_backbone_ntf()
    ttf_global = load_stanford_backbone_ttf()
    (port_map_global,
     port_reverse_map_global) = load_stanford_backbone_port_to_id_map()

    # Initialize the database
    if os.access(DATABASE_FILE, os.F_OK):
        os.remove(DATABASE_FILE)

    conn = sqlite3.connect(DATABASE_FILE)
    conn.execute(
        'CREATE TABLE %s (rule TEXT, input_port TEXT, output_port TEXT, action TEXT, file TEXT, line TEXT)'
        % TABLE_NETWORK_RULES)
    conn.execute(
        'CREATE TABLE %s (rule TEXT, input_port TEXT, output_port TEXT)' %
        TABLE_TOPOLOGY_RULES)
    conn.execute(
        'CREATE TABLE %s (header TEXT, input_port INTEGER, output_port INTEGER, ports TEXT, no_of_ports INTEGER, rules TEXT, no_of_rules INTEGER)'
        % TABLE_TEST_PACKETS)
    conn.execute(
        'CREATE TABLE %s (header TEXT, input_port INTEGER, output_port INTEGER, ports TEXT, no_of_ports INTEGER, rules TEXT, no_of_rules INTEGER)'
        % TABLE_TEST_PACKETS_LOCALLY_COMPRESSED)
    conn.execute('CREATE TABLE %s (rules TEXT, no_of_rules INTEGER)' %
                 TABLE_TEST_PACKETS_GLOBALLY_COMPRESSED)
    conn.execute('CREATE TABLE %s (rule TEXT)' % TABLE_RESULT_RULES)

    rule_count = 0
    for tf in ntf_global.tf_list:
        rule_count += len(tf.rules)
        for rule in tf.rules:
            query = "INSERT INTO %s VALUES (?, ?, ?, ?, ?, ?)" % TABLE_NETWORK_RULES
            conn.execute(
                query,
                (rule['id'], ' '.join(map(str, rule['in_ports'])), ' '.join(
                    map(str, rule['out_ports'])), rule['action'], rule["file"],
                 ' '.join(map(str, rule["line"]))))
    print "Total Rules: %d" % rule_count
    conn.commit()

    rule_count = len(ttf_global.rules)
    for rule in ttf_global.rules:
        query = "INSERT INTO %s VALUES (?, ?, ?)" % TABLE_TOPOLOGY_RULES
        conn.execute(query, (rule['id'], ' '.join(map(
            str, rule['in_ports'])), ' '.join(map(str, rule['out_ports']))))
    print "Total Links: %d" % rule_count

    # Generate all ports
    for rtr in port_map_global.keys():
        src_port_ids_global |= set(port_map_global[rtr].values())

    total_length = len(src_port_ids_global)
    if args.e == True:
        src_port_ids_global = get_end_ports()

    new_length = len(src_port_ids_global) * args.percentage / 100
    src_port_ids_global = random.sample(src_port_ids_global, new_length)
    print "Total Length: %d" % total_length
    print "New Length: %d" % new_length

    print src_port_ids_global

    for port in src_port_ids_global:
        port += output_port_addition
        dst_port_ids_global.add(port)

    #src_port_ids_global = [300013]
    #dst_port_ids_global = [320010]

    conn.commit()
    conn.close()

    # Run reachability
    start_time = time.time()

    pool = Pool()
    result = pool.map_async(find_test_packets, src_port_ids_global)

    # Close
    pool.close()
    pool.join()

    end_time = time.time()

    test_packet_count = result.get()
    total_paths = sum(test_packet_count)
    print "========== Before Compression ========="
    print "Total Paths = %d" % total_paths
    print "Average packets per port = %f" % (float(total_paths) /
                                             len(src_port_ids_global))
    print "Total Time = %fs" % (end_time - start_time)

    #Global Compressing
    start_time = time.time()

    conn = sqlite3.connect(DATABASE_FILE, 6000)
    result_rule_lists = []
    query = "SELECT rules FROM %s" % TABLE_TEST_PACKETS_LOCALLY_COMPRESSED
    rows = conn.execute(query)

    for row in rows:
        result_rule_lists.append(row[0].split())
    conn.close()

    chunk_size = 80000
    while (True):
        print "Start a new round!"
        conn = sqlite3.connect(DATABASE_FILE, 6000)
        conn.execute('DROP TABLE IF EXISTS %s' % TABLE_SCRATCHPAD)
        conn.execute('CREATE TABLE %s (rules TEXT, no_of_rules INTEGER)' %
                     TABLE_SCRATCHPAD)
        conn.commit()
        conn.close()

        start_len = len(result_rule_lists)
        print start_len

        pool = Pool()
        no_of_chunks = len(result_rule_lists) / chunk_size + 1
        rule_list_chunks = chunks(result_rule_lists, no_of_chunks)
        result = pool.map_async(rule_lists_compress, rule_list_chunks)

        # Close
        pool.close()
        pool.join()
        result.get()

        print "End of this round."

        result_rule_lists = read_rule_lists_from_database(TABLE_SCRATCHPAD)

        end_len = len(result_rule_lists)
        if (float(end_len) / float(start_len) > 0.99):
            break

    end_time = time.time()

    query = "INSERT INTO %s VALUES (?, ?)" % TABLE_TEST_PACKETS_GLOBALLY_COMPRESSED
    query2 = "INSERT INTO %s VALUES (?)" % TABLE_RESULT_RULES

    total_paths = len(result_rule_lists)
    total_length = 0

    conn = sqlite3.connect(DATABASE_FILE, 6000)
    conn.execute('DROP TABLE IF EXISTS %s' %
                 TABLE_TEST_PACKETS_GLOBALLY_COMPRESSED)
    conn.execute('CREATE TABLE %s (rules TEXT, no_of_rules INTEGER)' %
                 TABLE_TEST_PACKETS_GLOBALLY_COMPRESSED)

    for rule_list in result_rule_lists:
        total_length += len(rule_list)
        conn.execute(query, (" ".join(rule_list), len(rule_list)))
        for rule in rule_list:
            conn.execute(query2, (rule, ))

    conn.commit()
    conn.close()

    print "========== After Compression ========="
    print "Total Paths = %d" % total_paths
    print "Average packets per port = %f" % (float(total_paths) /
                                             len(src_port_ids_global))
    print "Average length of rule list = %f" % (float(total_length) /
                                                total_paths)
    print "Total Time = %fs" % (end_time - start_time)
def get_end_ports(name_to_id,index):

    linked_ports = [("bbra_rtr","te7/3"),
                    ("bbra_rtr","te7/2"),
                    ("bbra_rtr","te7/1"),
                    ("bbra_rtr","te1/3"),
                    ("bbra_rtr","te1/4"),
                    ("bbra_rtr","te6/1"),
                    ("bbra_rtr","te6/3"),

                    ("bbrb_rtr","te7/1"),
                    ("bbrb_rtr","te7/2"),
                    ("bbrb_rtr","te7/4"),
                    ("bbrb_rtr","te6/3"),
                    ("bbrb_rtr","te6/1"),
                    ("bbrb_rtr","te1/1"),
                    ("bbrb_rtr","te1/3"),

                    ("boza_rtr","te2/1"),
                    ("boza_rtr","te3/1"),
                    ("boza_rtr","te2/3"),
                    ("bozb_rtr","te2/3"),
                    ("bozb_rtr","te2/1"),
                    ("bozb_rtr","te3/1"),

                    ("coza_rtr","te3/1"),
                    ("coza_rtr","te2/1"),
                    ("coza_rtr","te2/3"),
                    ("cozb_rtr","te2/3"),
                    ("cozb_rtr","te2/1"),
                    ("cozb_rtr","te3/1"),

                    ("goza_rtr","te2/1"),
                    ("goza_rtr","te3/1"),
                    ("goza_rtr","te2/3"),
                    ("gozb_rtr","te2/3"),
                    ("gozb_rtr","te2/1"),
                    ("gozb_rtr","te3/1"),

                    ("poza_rtr","te2/1"),
                    ("poza_rtr","te3/1"),
                    ("poza_rtr","te2/3"),
                    ("pozb_rtr","te2/3"),
                    ("pozb_rtr","te2/1"),
                    ("pozb_rtr","te3/1"),

                    ("roza_rtr","te3/1"),
                    ("roza_rtr","te2/1"),
                    ("roza_rtr","te2/3"),
                    ("rozb_rtr","te2/3"),
                    ("rozb_rtr","te2/1"),
                    ("rozb_rtr","te3/1"),

                    ("soza_rtr","te2/1"),
                    ("soza_rtr","te3/1"),
                    ("soza_rtr","te2/3"),
                    ("sozb_rtr","te2/3"),
                    ("sozb_rtr","te3/1"),
                    ("sozb_rtr","te2/1"),

                    ("yoza_rtr","te7/1"),
                    ("yoza_rtr","te1/3"),
                    ("yoza_rtr","te1/1"),
                    ("yoza_rtr","te1/2"),
                    ("yozb_rtr","te1/2"),
                    ("yozb_rtr","te1/3"),
                    ("yozb_rtr","te2/1"),
                    ("yozb_rtr","te1/1"),

            ]


    end_ports = []
    cs = cisco_router(1)
    for rtr_name in rtr_names:
        mod_rtr_name = "%s%s"%(rtr_name,index)
        for rtr_port in name_to_id[mod_rtr_name]:
            if (rtr_name,rtr_port) not in linked_ports:
                end_ports.append(name_to_id[mod_rtr_name][rtr_port] + cs.PORT_TYPE_MULTIPLIER * cs.OUTPUT_PORT_TYPE_CONST)

    return end_ports
Beispiel #15
0
def generate_transfer_functions(settings):
  st = time()
  
  if ("replace_vlans" in settings.keys()):
    has_replaced_vlan = True
  else:
    has_replaced_vlan = False
    
  if "arp_table_file_sfx" in settings.keys():
    arp_sfx = settings["arp_table_file_sfx"]
  else:
    arp_sfx = "_arp_table.txt"
  if "mac_table_file_sfx" in settings.keys():
    mac_sfx = settings["mac_table_file_sfx"]
  else:
    mac_sfx = "_mac_table.txt"
  if "config_file_sfx" in settings.keys():
    config_sfx = settings["config_file_sfx"]
  else:
    config_sfx = "_config.txt"
  if "spanning_tree_file_sfx" in settings.keys():
    span_sfx = settings["spanning_tree_file_sfx"]
  else:
    span_sfx = "_spanning_tree.txt"
  if "route_table_file_sfx" in settings.keys():
    route_sfx = settings["route_table_file_sfx"]
  else:
    route_sfx = "_route.txt"

  # generate transfer functions
  L = 0
  id = 1
  cs_list = {}
  for i in range(len(settings["rtr_names"])):
    rtr_name = settings["rtr_names"][i]
    cs = cisco_router(id)
    if has_replaced_vlan:
      cs.set_replaced_vlan(settings["replace_vlans"][i])
    if "hs_format" in settings.keys():
      cs.set_hs_format(settings["hs_format"])
    L = cs.hs_format["length"]
    tf = TF(L)
    tf.set_prefix_id(rtr_name)
    cs.read_arp_table_file("%s/%s%s"%(settings["input_path"],rtr_name,arp_sfx))
    cs.read_mac_table_file("%s/%s%s"%(settings["input_path"],rtr_name,mac_sfx))
    cs.read_spanning_tree_file("%s/%s%s"%\
                               (settings["input_path"],rtr_name,span_sfx))
    cs.read_config_file("%s/%s%s"%(settings["input_path"],rtr_name,config_sfx))
    cs.read_route_file("%s/%s%s"%(settings["input_path"],rtr_name,route_sfx))
    if ("optimize_fwd_table" not in settings.keys() or \
        settings["optimize_fwd_table"]):
      cs.optimize_forwarding_table()
    if ("fwd_table_only" in settings.keys() and settings["fwd_table_only"]):
      cs.generate_port_ids_only_for_output_ports()
      cs.generate_fwd_table_tf(tf)
    else:
      cs.generate_port_ids([])
      cs.generate_transfer_function(tf)
    tf.save_as_json("%s/%s.tf.json"%(settings["output_path"],rtr_name))
    tf.save_object_to_file("%s/%s.tf"%(settings["output_path"],rtr_name))
    id += 1
    cs_list[rtr_name] = cs
    
  #generate port maps
  f = open("%s/port_map.json"%settings["output_path"],'w')
  port_map = {}
  for rtr in cs_list.keys():
    cs = cs_list[rtr]
    port_map[rtr] = cs.port_to_id
  f.write(json.dumps(port_map))
  f.close()
  
  #write topology:
  if "topology" in settings.keys():
    print "===Generating Topology==="
    out_port_addition = cisco_router.PORT_TYPE_MULTIPLIER * \
          cisco_router.OUTPUT_PORT_TYPE_CONST
    topology = settings["topology"]
    tf = TF(L)
    for (from_router,from_port,to_router,to_port) in topology:
        from_cs = cs_list[from_router]
        to_cs = cs_list[to_router]
        rule = TF.create_standard_rule(\
                      [from_cs.get_port_id(from_port) + out_port_addition],\
                        None,[to_cs.get_port_id(to_port)],\
                        None, None, "", [])
        tf.add_link_rule(rule)
        rule = TF.create_standard_rule(\
                      [to_cs.get_port_id(to_port) + out_port_addition], \
                        None,[from_cs.get_port_id(from_port)], \
                        None, None, "", [])
        tf.add_link_rule(rule)
    tf.save_as_json("%s/topology.tf.json"%settings["output_path"])
    tf.save_object_to_file("%s/topology.tf"%settings["output_path"])
    
  en = time()
  print "completed in ",en - st, "seconds"
    a special exception, as described in included LICENSE_EXCEPTION.txt.
    
Created on Sep 26, 2011

@author: Peyman Kazemian
'''
from headerspace.hs import *
from headerspace.tf import *
from headerspace.slice import *
from config_parser.cisco_router_parser import cisco_router
from config_parser.helper import *
from time import time
import math
import random

cs = cisco_router(1)
rtr_port_const = 100

rtr_ids =   {"bbra_rtr":100,
             "bbrb_rtr":200,
             "boza_rtr":300,
             "bozb_rtr":400,
             "coza_rtr":500,
             "cozb_rtr":600,
             "goza_rtr":700,
             "gozb_rtr":800,
             "poza_rtr":900,
             "pozb_rtr":1000,
             "roza_rtr":1100,
             "rozb_rtr":1200,
             "soza_rtr":1300,