def load_augmented_tf_to_nusmv(replication_factor, dir_path): ''' For Model Checking Project. Creates NuSMV file from transfer function objects of replicated Stanford network. ''' nusmv = NuSMV() cs = ciscoRouter(1) nusmv.set_output_port_offset(cs.PORT_TYPE_MULTIPLIER * cs.OUTPUT_PORT_TYPE_CONST) (port_map, port_reverse_map) = load_port_to_id_map(dir_path) end_ports = [] for replicate in range(1, replication_factor + 1): for rtr_name in rtr_names: f = TF(1) f.load_object_from_file("%s/%s%d.tf" % (dir_path, rtr_name, replicate)) nusmv.generate_nusmv_trans(f, []) end_ports_subset = get_end_ports(port_map, "%d" % replicate) end_ports.extend(end_ports_subset) f = TF(1) f.load_object_from_file("%s/root.tf" % (dir_path)) nusmv.generate_nusmv_trans(f, []) f = TF(1) f.load_object_from_file("%s/backbone_topology.tf" % dir_path) nusmv.generate_nusmv_trans(f, end_ports) nusmv.generate_nusmv_input() return nusmv
def load_augmented_tf_to_nusmv(replication_factor,dir_path): ''' For Model Checking Project. Creates NuSMV file from transfer function objects of replicated Stanford network. ''' nusmv = NuSMV() cs = ciscoRouter(1) nusmv.set_output_port_offset(cs.PORT_TYPE_MULTIPLIER * cs.OUTPUT_PORT_TYPE_CONST) (port_map,port_reverse_map) = load_port_to_id_map(dir_path) end_ports = [] for replicate in range(1,replication_factor+1): for rtr_name in rtr_names: f = TF(1) f.load_object_from_file("%s/%s%d.tf"%(dir_path,rtr_name,replicate)) nusmv.generate_nusmv_trans(f, []) end_ports_subset = get_end_ports(port_map,"%d"%replicate) end_ports.extend(end_ports_subset) f = TF(1) f.load_object_from_file("%s/root.tf"%(dir_path)) nusmv.generate_nusmv_trans(f, []) f = TF(1) f.load_object_from_file("%s/backbone_topology.tf"%dir_path) nusmv.generate_nusmv_trans(f,end_ports) nusmv.generate_nusmv_input() return nusmv
def print_paths_to_database(paths, reverse_map, table_name): # Timeout = 6000s insert_string = "INSERT INTO %s VALUES (?, ?, ?, ?, ?, ?, ?)" % table_name queries = [] for p_node in paths: path_string = "" for port in p_node["visits"]: path_string += ("%d " % port) path_string += ("%d " % p_node["port"]) port_count = len(p_node["visits"]) + 1 rl_id = "" for (n, r, s) in p_node["hdr"].applied_rule_ids: rl_id += (r + " ") rule_count = len(p_node["hdr"].applied_rule_ids) input_port = p_node["visits"][0] output_port = p_node["port"] output_hs = p_node["hdr"].copy() applied_rule_ids = list(output_hs.applied_rule_ids) input_hs = trace_hs_back(applied_rule_ids, output_hs, output_port)[0] header_string = json.dumps(parse_hs(ciscoRouter(1).hs_format, input_hs.hs_list[0])) #header_string = byte_array_to_pretty_hs_string(input_hs.hs_list[0]) queries.append((header_string, input_port, output_port, path_string, port_count, rl_id, rule_count)) conn = sqlite3.connect(DATABASE_FILE, 6000) for query in queries: conn.execute(insert_string, query) conn.commit() conn.close()
def load_port_to_id_map(path): ''' load the map from port ID to name of box-port name. ''' f = open("%s/port_map.txt"%path,'r') id_to_name = {} map = {} rtr = "" cs = ciscoRouter(1) for line in f: if line.startswith("$"): rtr = line[1:].strip() map[rtr] = {} elif line != "": tokens = line.strip().split(":") map[rtr][tokens[0]] = int(tokens[1]) id_to_name[tokens[1]] = "%s-%s"%(rtr,tokens[0]) out_port = int(tokens[1]) + cs.PORT_TYPE_MULTIPLIER * cs.OUTPUT_PORT_TYPE_CONST id_to_name["%s"%out_port] = "%s-%s"%(rtr,tokens[0]) return (map,id_to_name)
def load_tf_to_nusmv(): ''' For Model Checking Project. Creates NuSMV file from transfer function objects of Stanford network. ''' nusmv = NuSMV() cs = ciscoRouter(1) nusmv.set_output_port_offset(cs.PORT_TYPE_MULTIPLIER * cs.OUTPUT_PORT_TYPE_CONST) for rtr_name in rtr_names: f = TF(1) f.load_object_from_file("work/tf_stanford_backbone/%s.tf"%rtr_name) nusmv.generate_nusmv_trans(f, []) (port_map,port_reverse_map) = load_stanford_backbone_port_to_id_map() end_ports = get_end_ports(port_map,"") f = TF(1) f.load_object_from_file("work/tf_stanford_backbone/backbone_topology.tf") nusmv.generate_nusmv_trans(f,end_ports) nusmv.generate_nusmv_input() return nusmv
def print_paths_to_database(paths, reverse_map, table_name): # Timeout = 6000s insert_string = "INSERT INTO %s VALUES (?, ?, ?, ?, ?, ?, ?)" % table_name queries = [] for p_node in paths: path_string = "" for port in p_node["visits"]: path_string += ("%d " % port) path_string += ("%d " % p_node["port"]) port_count = len(p_node["visits"]) + 1 rl_id = "" for (n, r, s) in p_node["hdr"].applied_rule_ids: rl_id += (r + " ") rule_count = len(p_node["hdr"].applied_rule_ids) input_port = p_node["visits"][0] output_port = p_node["port"] output_hs = p_node["hdr"].copy() applied_rule_ids = list(output_hs.applied_rule_ids) input_hs = trace_hs_back(applied_rule_ids, output_hs, output_port)[0] header_string = json.dumps( parse_hs(ciscoRouter(1).hs_format, input_hs.hs_list[0])) #header_string = byte_array_to_pretty_hs_string(input_hs.hs_list[0]) queries.append((header_string, input_port, output_port, path_string, port_count, rl_id, rule_count)) conn = sqlite3.connect(DATABASE_FILE, 6000) for query in queries: conn.execute(insert_string, query) conn.commit() conn.close()
def get_end_ports(name_to_id, index): linked_ports = [ ("bbra_rtr", "te7/3"), ("bbra_rtr", "te7/2"), ("bbra_rtr", "te7/1"), ("bbra_rtr", "te1/3"), ("bbra_rtr", "te1/4"), ("bbra_rtr", "te6/1"), ("bbra_rtr", "te6/3"), ("bbrb_rtr", "te7/1"), ("bbrb_rtr", "te7/2"), ("bbrb_rtr", "te7/4"), ("bbrb_rtr", "te6/3"), ("bbrb_rtr", "te6/1"), ("bbrb_rtr", "te1/1"), ("bbrb_rtr", "te1/3"), ("boza_rtr", "te2/1"), ("boza_rtr", "te3/1"), ("boza_rtr", "te2/3"), ("bozb_rtr", "te2/3"), ("bozb_rtr", "te2/1"), ("bozb_rtr", "te3/1"), ("coza_rtr", "te3/1"), ("coza_rtr", "te2/1"), ("coza_rtr", "te2/3"), ("cozb_rtr", "te2/3"), ("cozb_rtr", "te2/1"), ("cozb_rtr", "te3/1"), ("goza_rtr", "te2/1"), ("goza_rtr", "te3/1"), ("goza_rtr", "te2/3"), ("gozb_rtr", "te2/3"), ("gozb_rtr", "te2/1"), ("gozb_rtr", "te3/1"), ("poza_rtr", "te2/1"), ("poza_rtr", "te3/1"), ("poza_rtr", "te2/3"), ("pozb_rtr", "te2/3"), ("pozb_rtr", "te2/1"), ("pozb_rtr", "te3/1"), ("roza_rtr", "te3/1"), ("roza_rtr", "te2/1"), ("roza_rtr", "te2/3"), ("rozb_rtr", "te2/3"), ("rozb_rtr", "te2/1"), ("rozb_rtr", "te3/1"), ("soza_rtr", "te2/1"), ("soza_rtr", "te3/1"), ("soza_rtr", "te2/3"), ("sozb_rtr", "te2/3"), ("sozb_rtr", "te3/1"), ("sozb_rtr", "te2/1"), ("yoza_rtr", "te7/1"), ("yoza_rtr", "te1/3"), ("yoza_rtr", "te1/1"), ("yoza_rtr", "te1/2"), ("yozb_rtr", "te1/2"), ("yozb_rtr", "te1/3"), ("yozb_rtr", "te2/1"), ("yozb_rtr", "te1/1"), ] end_ports = [] cs = ciscoRouter(1) for rtr_name in rtr_names: mod_rtr_name = "%s%s" % (rtr_name, index) for rtr_port in name_to_id[mod_rtr_name]: if (rtr_name, rtr_port) not in linked_ports: end_ports.append(name_to_id[mod_rtr_name][rtr_port] + cs.PORT_TYPE_MULTIPLIER * cs.OUTPUT_PORT_TYPE_CONST) return end_ports
You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. Created on Aug 14, 2011 @author: Peyman Kazemian ''' from utils.load_stanford_backbone import * from config_parser.cisco_router_parser import ciscoRouter from headerspace.hs import * from headerspace.applications import * from time import time, clock (ntf, ttf, port_map, port_reverse_map) = load_replicated_stanford_network( 16, "16xtf_stanford_backbone") cs = ciscoRouter(1) output_port_addition = cs.PORT_TYPE_MULTIPLIER * cs.OUTPUT_PORT_TYPE_CONST #add_internet(ntf,ttf,port_map,cs,[("171.64.0.0",14),("128.12.0.0",16)]) all_x = byte_array_get_all_x(ntf.length) #cs.set_field(all_x, "ip_dst", dotted_ip_to_int("172.0.0.0"), 21) #cs.set_field(all_x, "vlan", 92, 0) test_pkt = headerspace(ntf.length) test_pkt.add_hs(all_x) src_port_id = port_map["bbra_rtr1"]["te6/3"] dst_port_ids = [port_map["roza_rtr10"]["te3/3"] + output_port_addition] st = time() paths = find_reachability(ntf, ttf, src_port_id, dst_port_ids, test_pkt)
""" <Run loop detection test on Stanford network> Created on Aug 14, 2011 @author: Peyman Kazemian """ from utils.load_stanford_backbone import * from config_parser.cisco_router_parser import ciscoRouter from headerspace.hs import * from headerspace.applications import * from time import time, clock ntf = load_stanford_backbone_ntf() ttf = load_stanford_backbone_ttf() (port_map, port_reverse_map) = load_stanford_backbone_port_to_id_map() cs = ciscoRouter(1) output_port_addition = cs.PORT_TYPE_MULTIPLIER * cs.OUTPUT_PORT_TYPE_CONST # add_internet(ntf,ttf,port_map,cs,[("171.64.0.0",14),("128.12.0.0",16)]) all_x = byte_array_get_all_x(ntf.length) # cs.set_field(all_x, "ip_dst", dotted_ip_to_int("172.0.0.0"), 21) # cs.set_field(all_x, "vlan", 92, 0) test_pkt = headerspace(ntf.length) test_pkt.add_hs(all_x) loop_port_ids = [ port_map["bbra_rtr"]["te7/1"], port_map["bbrb_rtr"]["te7/1"], port_map["bbra_rtr"]["te6/3"], port_map["bbrb_rtr"]["te7/4"],
def main(): global src_port_ids_global global dst_port_ids_global global port_map_global global port_reverse_map_global global ntf_global global ttf_global global DATABASE_FILE parser = ArgumentParser(description="Generate Test Packets for stanford") parser.add_argument("-p", dest="percentage", type=int, default="100", help="Percentage of test terminals") parser.add_argument("-f", dest="filename", default="stanford.sqlite", help="Filename of the database") parser.add_argument("-e", action="store_true", default=False, help="Edge port only") parser.add_argument("--folder", dest="work_folder", help="Where to look for transfer functions") args = parser.parse_args() DATABASE_FILE = "work/%s" % args.filename cs = ciscoRouter(1) output_port_addition = cs.PORT_TYPE_MULTIPLIER * cs.OUTPUT_PORT_TYPE_CONST # Load .tf files ntf_global = load_stanford_backbone_ntf() ttf_global = load_stanford_backbone_ttf() (port_map_global, port_reverse_map_global) = load_stanford_backbone_port_to_id_map() # Initialize the database if os.access(DATABASE_FILE, os.F_OK): os.remove(DATABASE_FILE) conn = sqlite3.connect(DATABASE_FILE) conn.execute('CREATE TABLE %s (rule TEXT, input_port TEXT, output_port TEXT, action TEXT, file TEXT, line TEXT)' % TABLE_NETWORK_RULES) conn.execute('CREATE TABLE %s (rule TEXT, input_port TEXT, output_port TEXT)' % TABLE_TOPOLOGY_RULES) conn.execute('CREATE TABLE %s (header TEXT, input_port INTEGER, output_port INTEGER, ports TEXT, no_of_ports INTEGER, rules TEXT, no_of_rules INTEGER)' % TABLE_TEST_PACKETS) conn.execute('CREATE TABLE %s (header TEXT, input_port INTEGER, output_port INTEGER, ports TEXT, no_of_ports INTEGER, rules TEXT, no_of_rules INTEGER)' % TABLE_TEST_PACKETS_LOCALLY_COMPRESSED) conn.execute('CREATE TABLE %s (rules TEXT, no_of_rules INTEGER)' % TABLE_TEST_PACKETS_GLOBALLY_COMPRESSED) conn.execute('CREATE TABLE %s (rule TEXT)' % TABLE_RESULT_RULES) rule_count = 0 for tf in ntf_global.tf_list: rule_count += len(tf.rules) for rule in tf.rules: query = "INSERT INTO %s VALUES (?, ?, ?, ?, ?, ?)" % TABLE_NETWORK_RULES conn.execute(query, (rule['id'],' '.join(map(str, rule['in_ports'])), ' '.join(map(str, rule['out_ports'])), rule['action'], rule["file"], ' '.join(map(str, rule["line"])))) print "Total Rules: %d" % rule_count conn.commit() rule_count = len(ttf_global.rules) for rule in ttf_global.rules: query = "INSERT INTO %s VALUES (?, ?, ?)" % TABLE_TOPOLOGY_RULES conn.execute(query, (rule['id'],' '.join(map(str, rule['in_ports'])), ' '.join(map(str, rule['out_ports'])))) print "Total Links: %d" % rule_count # Generate all ports for rtr in port_map_global.keys(): src_port_ids_global |= set(port_map_global[rtr].values()) total_length = len(src_port_ids_global) if args.e == True: src_port_ids_global = get_end_ports() new_length = len(src_port_ids_global)* args.percentage / 100 src_port_ids_global = random.sample(src_port_ids_global, new_length) print "Total Length: %d" % total_length print "New Length: %d" % new_length print src_port_ids_global for port in src_port_ids_global: port += output_port_addition dst_port_ids_global.add(port) #src_port_ids_global = [300013] #dst_port_ids_global = [320010] conn.commit() conn.close() # Run reachability start_time = time.time() pool = Pool() result = pool.map_async(find_test_packets, src_port_ids_global) # Close pool.close() pool.join() end_time = time.time() test_packet_count = result.get() total_paths = sum(test_packet_count) print "========== Before Compression =========" print "Total Paths = %d" % total_paths print "Average packets per port = %f" % (float(total_paths) / len(src_port_ids_global)) print "Total Time = %fs" % (end_time - start_time) #Global Compressing start_time = time.time() conn = sqlite3.connect(DATABASE_FILE, 6000) result_rule_lists = [] query = "SELECT rules FROM %s" % TABLE_TEST_PACKETS_LOCALLY_COMPRESSED rows = conn.execute(query) for row in rows: result_rule_lists.append(row[0].split()) conn.close() chunk_size = 80000 while(True): print "Start a new round!" conn = sqlite3.connect(DATABASE_FILE, 6000) conn.execute('DROP TABLE IF EXISTS %s' % TABLE_SCRATCHPAD) conn.execute('CREATE TABLE %s (rules TEXT, no_of_rules INTEGER)' % TABLE_SCRATCHPAD) conn.commit() conn.close() start_len = len(result_rule_lists) print start_len pool = Pool() no_of_chunks = len(result_rule_lists) / chunk_size + 1 rule_list_chunks = chunks(result_rule_lists, no_of_chunks) result = pool.map_async(rule_lists_compress, rule_list_chunks) # Close pool.close() pool.join() result.get() print "End of this round." result_rule_lists = read_rule_lists_from_database(TABLE_SCRATCHPAD) end_len = len(result_rule_lists) if(float(end_len) / float(start_len) > 0.99): break end_time = time.time() query = "INSERT INTO %s VALUES (?, ?)" % TABLE_TEST_PACKETS_GLOBALLY_COMPRESSED query2 = "INSERT INTO %s VALUES (?)" % TABLE_RESULT_RULES total_paths = len(result_rule_lists) total_length = 0 conn = sqlite3.connect(DATABASE_FILE, 6000) conn.execute('DROP TABLE IF EXISTS %s' % TABLE_TEST_PACKETS_GLOBALLY_COMPRESSED) conn.execute('CREATE TABLE %s (rules TEXT, no_of_rules INTEGER)' % TABLE_TEST_PACKETS_GLOBALLY_COMPRESSED) for rule_list in result_rule_lists: total_length += len(rule_list) conn.execute(query, (" ".join(rule_list), len(rule_list))) for rule in rule_list: conn.execute(query2, (rule,)) conn.commit() conn.close() print "========== After Compression =========" print "Total Paths = %d" % total_paths print "Average packets per port = %f" % (float(total_paths) / len(src_port_ids_global)) print "Average length of rule list = %f" % (float(total_length) / total_paths) print "Total Time = %fs" % (end_time - start_time)
def get_end_ports(name_to_id,index): linked_ports = [("bbra_rtr","te7/3"), ("bbra_rtr","te7/2"), ("bbra_rtr","te7/1"), ("bbra_rtr","te1/3"), ("bbra_rtr","te1/4"), ("bbra_rtr","te6/1"), ("bbra_rtr","te6/3"), ("bbrb_rtr","te7/1"), ("bbrb_rtr","te7/2"), ("bbrb_rtr","te7/4"), ("bbrb_rtr","te6/3"), ("bbrb_rtr","te6/1"), ("bbrb_rtr","te1/1"), ("bbrb_rtr","te1/3"), ("boza_rtr","te2/1"), ("boza_rtr","te3/1"), ("boza_rtr","te2/3"), ("bozb_rtr","te2/3"), ("bozb_rtr","te2/1"), ("bozb_rtr","te3/1"), ("coza_rtr","te3/1"), ("coza_rtr","te2/1"), ("coza_rtr","te2/3"), ("cozb_rtr","te2/3"), ("cozb_rtr","te2/1"), ("cozb_rtr","te3/1"), ("goza_rtr","te2/1"), ("goza_rtr","te3/1"), ("goza_rtr","te2/3"), ("gozb_rtr","te2/3"), ("gozb_rtr","te2/1"), ("gozb_rtr","te3/1"), ("poza_rtr","te2/1"), ("poza_rtr","te3/1"), ("poza_rtr","te2/3"), ("pozb_rtr","te2/3"), ("pozb_rtr","te2/1"), ("pozb_rtr","te3/1"), ("roza_rtr","te3/1"), ("roza_rtr","te2/1"), ("roza_rtr","te2/3"), ("rozb_rtr","te2/3"), ("rozb_rtr","te2/1"), ("rozb_rtr","te3/1"), ("soza_rtr","te2/1"), ("soza_rtr","te3/1"), ("soza_rtr","te2/3"), ("sozb_rtr","te2/3"), ("sozb_rtr","te3/1"), ("sozb_rtr","te2/1"), ("yoza_rtr","te7/1"), ("yoza_rtr","te1/3"), ("yoza_rtr","te1/1"), ("yoza_rtr","te1/2"), ("yozb_rtr","te1/2"), ("yozb_rtr","te1/3"), ("yozb_rtr","te2/1"), ("yozb_rtr","te1/1"), ] end_ports = [] cs = ciscoRouter(1) for rtr_name in rtr_names: mod_rtr_name = "%s%s"%(rtr_name,index) for rtr_port in name_to_id[mod_rtr_name]: if (rtr_name,rtr_port) not in linked_ports: end_ports.append(name_to_id[mod_rtr_name][rtr_port] + cs.PORT_TYPE_MULTIPLIER * cs.OUTPUT_PORT_TYPE_CONST) return end_ports
def main(): global src_port_ids_global global dst_port_ids_global global port_map_global global port_reverse_map_global global ntf_global global ttf_global global DATABASE_FILE parser = ArgumentParser(description="Generate Test Packets for stanford") parser.add_argument("-p", dest="percentage", type=int, default="100", help="Percentage of test terminals") parser.add_argument("-f", dest="filename", default="stanford.sqlite", help="Filename of the database") parser.add_argument("-e", action="store_true", default=False, help="Edge port only") args = parser.parse_args() DATABASE_FILE = "work/%s" % args.filename cs = ciscoRouter(1) output_port_addition = cs.PORT_TYPE_MULTIPLIER * cs.OUTPUT_PORT_TYPE_CONST # Load .tf files ntf_global = load_stanford_backbone_ntf() ttf_global = load_stanford_backbone_ttf() (port_map_global, port_reverse_map_global) = load_stanford_backbone_port_to_id_map() # Initialize the database if os.access(DATABASE_FILE, os.F_OK): os.remove(DATABASE_FILE) conn = sqlite3.connect(DATABASE_FILE) conn.execute( 'CREATE TABLE %s (rule TEXT, input_port TEXT, output_port TEXT, action TEXT, file TEXT, line TEXT)' % TABLE_NETWORK_RULES) conn.execute( 'CREATE TABLE %s (rule TEXT, input_port TEXT, output_port TEXT)' % TABLE_TOPOLOGY_RULES) conn.execute( 'CREATE TABLE %s (header TEXT, input_port INTEGER, output_port INTEGER, ports TEXT, no_of_ports INTEGER, rules TEXT, no_of_rules INTEGER)' % TABLE_TEST_PACKETS) conn.execute( 'CREATE TABLE %s (header TEXT, input_port INTEGER, output_port INTEGER, ports TEXT, no_of_ports INTEGER, rules TEXT, no_of_rules INTEGER)' % TABLE_TEST_PACKETS_LOCALLY_COMPRESSED) conn.execute('CREATE TABLE %s (rules TEXT, no_of_rules INTEGER)' % TABLE_TEST_PACKETS_GLOBALLY_COMPRESSED) conn.execute('CREATE TABLE %s (rule TEXT)' % TABLE_RESULT_RULES) rule_count = 0 for tf in ntf_global.tf_list: rule_count += len(tf.rules) for rule in tf.rules: query = "INSERT INTO %s VALUES (?, ?, ?, ?, ?, ?)" % TABLE_NETWORK_RULES conn.execute( query, (rule['id'], ' '.join(map(str, rule['in_ports'])), ' '.join( map(str, rule['out_ports'])), rule['action'], rule["file"], ' '.join(map(str, rule["line"])))) print "Total Rules: %d" % rule_count conn.commit() rule_count = len(ttf_global.rules) for rule in ttf_global.rules: query = "INSERT INTO %s VALUES (?, ?, ?)" % TABLE_TOPOLOGY_RULES conn.execute(query, (rule['id'], ' '.join(map( str, rule['in_ports'])), ' '.join(map(str, rule['out_ports'])))) print "Total Links: %d" % rule_count # Generate all ports for rtr in port_map_global.keys(): src_port_ids_global |= set(port_map_global[rtr].values()) total_length = len(src_port_ids_global) if args.e == True: src_port_ids_global = get_end_ports() new_length = len(src_port_ids_global) * args.percentage / 100 src_port_ids_global = random.sample(src_port_ids_global, new_length) print "Total Length: %d" % total_length print "New Length: %d" % new_length print src_port_ids_global for port in src_port_ids_global: port += output_port_addition dst_port_ids_global.add(port) #src_port_ids_global = [300013] #dst_port_ids_global = [320010] conn.commit() conn.close() # Run reachability start_time = time.time() pool = Pool() result = pool.map_async(find_test_packets, src_port_ids_global) # Close pool.close() pool.join() end_time = time.time() test_packet_count = result.get() total_paths = sum(test_packet_count) print "========== Before Compression =========" print "Total Paths = %d" % total_paths print "Average packets per port = %f" % (float(total_paths) / len(src_port_ids_global)) print "Total Time = %fs" % (end_time - start_time) #Global Compressing start_time = time.time() conn = sqlite3.connect(DATABASE_FILE, 6000) result_rule_lists = [] query = "SELECT rules FROM %s" % TABLE_TEST_PACKETS_LOCALLY_COMPRESSED rows = conn.execute(query) for row in rows: result_rule_lists.append(row[0].split()) conn.close() chunk_size = 80000 while (True): print "Start a new round!" conn = sqlite3.connect(DATABASE_FILE, 6000) conn.execute('DROP TABLE IF EXISTS %s' % TABLE_SCRATCHPAD) conn.execute('CREATE TABLE %s (rules TEXT, no_of_rules INTEGER)' % TABLE_SCRATCHPAD) conn.commit() conn.close() start_len = len(result_rule_lists) print start_len pool = Pool() no_of_chunks = len(result_rule_lists) / chunk_size + 1 rule_list_chunks = chunks(result_rule_lists, no_of_chunks) result = pool.map_async(rule_lists_compress, rule_list_chunks) # Close pool.close() pool.join() result.get() print "End of this round." result_rule_lists = read_rule_lists_from_database(TABLE_SCRATCHPAD) end_len = len(result_rule_lists) if (float(end_len) / float(start_len) > 0.99): break end_time = time.time() query = "INSERT INTO %s VALUES (?, ?)" % TABLE_TEST_PACKETS_GLOBALLY_COMPRESSED query2 = "INSERT INTO %s VALUES (?)" % TABLE_RESULT_RULES total_paths = len(result_rule_lists) total_length = 0 conn = sqlite3.connect(DATABASE_FILE, 6000) conn.execute('DROP TABLE IF EXISTS %s' % TABLE_TEST_PACKETS_GLOBALLY_COMPRESSED) conn.execute('CREATE TABLE %s (rules TEXT, no_of_rules INTEGER)' % TABLE_TEST_PACKETS_GLOBALLY_COMPRESSED) for rule_list in result_rule_lists: total_length += len(rule_list) conn.execute(query, (" ".join(rule_list), len(rule_list))) for rule in rule_list: conn.execute(query2, (rule, )) conn.commit() conn.close() print "========== After Compression =========" print "Total Paths = %d" % total_paths print "Average packets per port = %f" % (float(total_paths) / len(src_port_ids_global)) print "Average length of rule list = %f" % (float(total_length) / total_paths) print "Total Time = %fs" % (end_time - start_time)