コード例 #1
0
ファイル: Fig12.py プロジェクト: tjt7a/APSim
def process_single_ds(uat):
    try:

        uat_count = 200
        # number of total automata to be processed. Make this number smaller to get results faster for a subset

        automatas = atma.parse_anml_file(anml_path[uat])
        automatas.remove_ors()
        automatas = automatas.get_connected_components_as_automatas()

        automatas = automatas[:uat_count]
        # pick the number of automataon to be processed. comment this line if you want to process the whole benchmark

        target_bit_widths = [1, 2, 4, 16]
        # bitwidth to be calculated

        stats = [[0, 0] for _ in range(len(target_bit_widths))]

        for atm in automatas:
            b_atm = atma.automata_network.get_bit_automaton(atm, original_bit_width=atm.max_val_dim_bits_len)
            # generate bit automaton

            for tb_idx, tb in enumerate(target_bit_widths):
                if tb == 1:
                    atm = b_atm.clone()
                else:
                    atm = atma.automata_network.get_strided_automata2(atm=b_atm,
                                                                  stride_value=tb,
                                                                  is_scalar=True,
                                                                  base_value=2,
                                                                  add_residual=True)

                if atm.is_homogeneous is False:
                    atm.make_homogenous()

                minimize_automata(atm)

                n_s = atm.nodes_count
                n_e = atm.edges_count

                stats[tb_idx][0] += n_s
                stats[tb_idx][1] += n_e

        global_lock.acquire()
        print uat
        for tb_idx, tb in enumerate(target_bit_widths):
            print "bitwidth = ", tb, "number of states = ", stats[tb_idx][0], "number of edges = ", stats[tb_idx][1]

        global_lock.release()
        return uat, stats
    except Exception, e:
        tracebackString = traceback.format_exc(e)
        print tracebackString
        raise StandardError, "\n\nError occurred. Original traceback is\n%s\n" %(tracebackString)
コード例 #2
0
ファイル: stride_16_lut.py プロジェクト: tjt7a/APSim
def process_single_ds(uat):
    automata_per_stage = 50
    exempts = {(AnmalZoo.Snort, 1411)}

    automatas = atma.parse_anml_file(anml_path[uat])
    automatas.remove_ors()
    automatas = automatas.get_connected_components_as_automatas()

    uat_count = len(automatas)
    automatas = automatas[:uat_count]
    uat_count = len(automatas)

    number_of_stages = math.ceil(len(automatas) / float(automata_per_stage))
    atms_per_stage = int(math.ceil(len(automatas) / float(number_of_stages)))

    hdl_folder_name = hd_gen.get_hdl_folder_name(prefix=str(uat), number_of_atms=uat_count,
                                                  stride_value=stride, before_match_reg=False,
                                                  after_match_reg=False, ste_type=1, use_bram=False,
                                                  use_compression=False, compression_depth=-1,
                                                  use_mid_fifo=False, use_rst=True)

    generator_ins = hd_gen.HDL_Gen(path=os.path.join("/home/gr5yf/FCCM_2020/lut16", hdl_folder_name), before_match_reg=False,
                                           after_match_reg=False, ste_type=1,
                                           total_input_len=automatas[0].max_val_dim_bits_len * pow(2, stride),
                                           bram_shape=None)

    for atm_idx, atm in enumerate(automatas):
        if (uat, atm_idx) in exempts:
            continue
        print 'processing {0} stride {3} number {1} from {2}'.format(uat, atm_idx, uat_count, stride)

        atm = atm.get_single_stride_graph()
        atm = atm.get_single_stride_graph()
        atm.make_homogenous()

        minimize_automata(atm)

        generator_ins.register_automata(atm=atm, use_compression=False, lut_bram_dic={})

        if (atm_idx + 1) % atms_per_stage == 0:
            generator_ins.register_stage_pending(use_bram=False)

    generator_ins.register_stage_pending(use_bram=False)

    generator_ins.finilize(dataplane_intcon_max_degree=5, contplane_intcon_max_degree=10)
コード例 #3
0
def process_single_ds(uat):
    automatas = atma.parse_anml_file(anml_path[uat])
    approximate_final, real_final = [], []  # these lists keep number of reports for each CC
    real_states, appr_states = 0, 0  # these integers count number of states
    translation_dic = {x: x % approximate_ratio for x in range(automatas.max_val_dim + 1)}

    automatas.remove_ors()
    automatas = automatas.get_connected_components_as_automatas()
    with open(str(uat) + '.txt', "w+") as f:
        pass
    for atm in automatas[:uat_count]:
        real_states+= atm.nodes_count
        atm.set_all_symbols_mutation(False)

        appr_automata = get_approximate_automata(atm=atm, translation_dic=translation_dic,
                                                max_val_dim=atm.max_val_dim / approximate_ratio)
        minimize_automata(automata=appr_automata)
        appr_states += appr_automata.nodes_count

        run_result = automata_run_stat(atm=atm, file_path=input_path[uat], cycle_detail=True, report_detail=False, bytes_per_dim=1)
        real_final.append(run_result[total_reports])
        appr_run_result = automata_run_stat(atm=appr_automata, file_path=input_path[uat], cycle_detail=True, report_detail=False, bytes_per_dim=1,
                                       translation_dic=translation_dic)
        approximate_final.append(appr_run_result[total_reports])

        with open(str(uat) + '.txt', "a") as f:
            print >> f, "real reports: " + str(real_final[-1])
            print >> f, "approximate reports: " + str(approximate_final[-1])
            print >> f, "real nodes count: " + str(atm.nodes_count)
            print >> f, "approximate nodes count:" + str(appr_automata.nodes_count)
            print >>f, "----------------------------------------------------------------"


    with open(str(uat) + '.ttxt', "a") as f:
        print >>f, "***************sum*******************"
        print >>f, "real reports: " + str(sum(real_final))
        print >>f, "approximate reports: " + str(sum(approximate_final))
        print >>f, "real nodes count: " + str(real_states)
        print >>f, "approximate nodes count:" + str(appr_states)
コード例 #4
0
ファイル: Table2.py プロジェクト: tjt7a/APSim
def process_single_ds(uat):

    automatas = atma.parse_anml_file(anml_path[uat])
    automatas.remove_ors()
    automatas = automatas.get_connected_components_as_automatas()

    uat_count = len(automatas)

    n_states = 0.0
    n_edges = 0.0
    total_sym = 0.0

    for atm_idx, atm in enumerate(automatas):

        minimize_automata(atm)

        all_nodes = filter(lambda n: n.id != 0, atm.nodes)  # filter fake root
        all_nodes_symbols_len_count = [
            len(list(n.symbols.points)) for n in all_nodes
        ]

        n_s = atm.nodes_count
        n_states += n_s

        n_e = atm.edges_count
        n_edges += n_e

        t_s = sum(all_nodes_symbols_len_count)
        total_sym += t_s

    global_lock.acquire()
    print str(uat), "\t nodes count = ", n_states, "\tedges count = ", n_edges, "\tAvg node degree = ",\
        2 * n_edges / n_states, "\tAvg symbol size = ", total_sym/n_states

    global_lock.release()

    return True
コード例 #5
0
    orig_atm_nodes_count = []
    fst_stride_nodes_count = []
    sec_stride_nodes_count = []
    third_stride_nodes_count = []

    orig_automatas = automata.get_connected_components_as_automatas()

    for orig_cc in tqdm(orig_automatas, unit="automata"):
        orig_atm_nodes_count.append((orig_cc.get_number_of_nodes(), orig_cc.edges_count,
                                     orig_cc.max_STE_out_degree(),orig_cc.max_STE_in_degree() ))

        orig_cc.remove_all_start_nodes()

        fst_st_atm = orig_cc.get_single_stride_graph()
        fst_st_atm.make_homogenous()
        minimize_automata(fst_st_atm)
        fst_stride_nodes_count.append((fst_st_atm.get_number_of_nodes(), fst_st_atm.edges_count,
                                       fst_st_atm.max_STE_out_degree(),fst_st_atm.max_STE_in_degree()))

        sec_st_atm = fst_st_atm.get_single_stride_graph()
        sec_st_atm.make_homogenous()
        minimize_automata(sec_st_atm)
        sec_stride_nodes_count.append((sec_st_atm.get_number_of_nodes(), sec_st_atm.edges_count,
                                       sec_st_atm.max_STE_out_degree(),sec_st_atm.max_STE_in_degree()))

        thd_st_atm = sec_st_atm.get_single_stride_graph()
        thd_st_atm.make_homogenous()
        minimize_automata(thd_st_atm)
        third_stride_nodes_count.append((thd_st_atm.get_number_of_nodes(), thd_st_atm.edges_count,
                                         thd_st_atm.max_STE_out_degree(),thd_st_atm.max_STE_in_degree()))
コード例 #6
0
                bc_sym_dict = get_equivalent_symbols([atm], replace=True)

            translation_list = []

            print atm.get_summary(logo="9bit atm info")
            atm = atm.get_single_stride_graph()
            print atm.get_summary(logo="done st1")
            atm = atm.get_single_stride_graph()
            print atm.get_summary(logo="done st2")
            atm = atm.get_single_stride_graph()
            print atm.get_summary(logo="done st3")

            if atm.is_homogeneous is False:
                atm.make_homogenous()

            minimize_automata(atm)

            print atm.get_summary()

            strided_automatas.append(atm.id)

            generator_ins.register_automata(atm=atm, use_compression=use_compression, byte_trans_map=bc_sym_dict if use_compression else None,
                                            translation_list=translation_list)
            if use_compression:
                generator_ins.register_compressor([atm.id], byte_trans_map=bc_sym_dict,
                                                  translation_list=translation_list)

            if (atm_idx + 1) % atms_per_stage == 0:
                generator_ins.register_stage_pending()

        generator_ins.register_stage_pending()
コード例 #7
0
    for stride in range(max_stride,
                        max_stride + 1):  # one more for the original automata
        acc_switch_map = np.zeros((switch_size, switch_size))

        print "starting stride ", stride
        for cc_idx, cc in enumerate(ccs[:]):
            print "processing {} , id {}".format(anml, cc_idx)
            print cc.get_summary(logo="original")

            for _ in range(stride):
                cc = cc.get_single_stride_graph()

            if cc.is_homogeneous is False:
                cc.make_homogenous()

            utility.minimize_automata(cc)
            cc.fix_split_all()

            print cc.get_summary()

            bfs_cost, bfs_label_dictionary = cc.bfs_rout(routing_template)

            switch_map = cc.get_connectivity_matrix(
                node_dictionary=bfs_label_dictionary)

            acc_mat_size, _ = acc_switch_map.shape
            new_mat_size, _ = switch_map.shape

            if new_mat_size > acc_mat_size:
                switch_map[:acc_mat_size, :acc_mat_size] += acc_switch_map
                acc_switch_map = switch_map
コード例 #8
0
ファイル: bram_based_8bit.py プロジェクト: tjt7a/APSim
def process_single_ds(uat):

    return_result = {}
    #result_dir = out_dir_prefix + str(uat)

    #shutil.rmtree(result_dir, ignore_errors=True)
    #os.mkdir(result_dir)
    # cleaning the result folder

    automata_per_stage = 100
    # this is a pipelineing parameter for staging as pipeline. We usually use 50 for this parameter

    automatas = atma.parse_anml_file(anml_path[uat])
    automatas.remove_ors()
    automatas = automatas.get_connected_components_as_automatas()

    #uat_count = 10  # number of automata to be processed
    uat_count = len(
        automatas
    )  # comment this to test a subset of automatons defined in uat_count
    automatas = automatas[:uat_count]
    uat_count = len(automatas)
    number_of_stages = int(
        math.ceil(len(automatas) / float(automata_per_stage)))
    # number of pipleine stages

    atms_per_stage = int(math.ceil(len(automatas) / float(number_of_stages)))

    hdl_folder_name = hd_gen.get_hdl_folder_name(prefix=str(uat),
                                                 number_of_atms=len(automatas),
                                                 stride_value=0,
                                                 before_match_reg=False,
                                                 after_match_reg=False,
                                                 ste_type=1,
                                                 use_bram=True,
                                                 use_compression=False,
                                                 compression_depth=-1,
                                                 use_mid_fifo=use_mid_fifo,
                                                 use_rst=use_rst)

    print "folder name to store the HDLs:", hdl_folder_name

    generator_ins = hd_gen.HDL_Gen(path=os.path.join(
        "/home/gr5yf/FCCM_2020/bram8", hdl_folder_name),
                                   before_match_reg=False,
                                   after_match_reg=False,
                                   ste_type=1,
                                   total_input_len=dbw,
                                   use_mid_fifo=use_mid_fifo,
                                   use_rst=use_rst,
                                   bram_shape=(512, 36))

    for atm_idx, atm in enumerate(automatas):
        print 'processing {0} number {1} from {2}'.format(
            uat, atm_idx + 1, uat_count)
        minimize_automata(atm)
        lut_bram_dic = {
            n: tuple((2 for _ in range(atm.stride_value)))
            for n in atm.nodes if n.is_fake is False
        }

        generator_ins.register_automata(atm=atm,
                                        use_compression=False,
                                        lut_bram_dic=lut_bram_dic)

        if (atm_idx + 1) % atms_per_stage == 0:
            generator_ins.register_stage_pending(use_bram=True)

    generator_ins.register_stage_pending(use_bram=True)

    generator_ins.finilize(dataplane_intcon_max_degree=5,
                           contplane_intcon_max_degree=10)

    return uat, return_result
コード例 #9
0
def process_anml(bitwidth, input_directory, automata_per_stage):

    # This is the directory name to be created for HDL files
    output_hdl_directory = input_directory + '/' + str(bitwidth) + '_' + str(
        automata_per_stage)

    anml_input_files = glob.glob(input_directory + '/*.anml')
    print("ANML Files: ", anml_input_files)

    # Clean up directory
    shutil.rmtree(output_hdl_directory, ignore_errors=True)
    os.mkdir(output_hdl_directory)

    SYMBOLIC = True

    # Create a directory name for the HDL code
    hdl_folder_name = hd_gen.get_hdl_folder_name(
        prefix=output_hdl_directory,
        number_of_atms=len(anml_input_files),
        stride_value=0,
        before_match_reg=False,
        after_match_reg=False,
        ste_type=1,
        use_bram=False,
        use_compression=False,
        compression_depth=-1,
        symbolic=SYMBOLIC)

    print("Folder name to store the HDLs: ", hdl_folder_name)

    # Create a hardware Generator
    generator_ins = hd_gen.HDL_Gen(path=hdl_folder_name,
                                   before_match_reg=False,
                                   after_match_reg=False,
                                   ste_type=1,
                                   total_input_len=dbw,
                                   symbolic=SYMBOLIC)

    # Iterate through the ANML files in the directory
    for index, anml_input_file in enumerate(anml_input_files):

        print("Parsing Automata: {}".format(anml_input_file))

        module_name = 'Automata_tt_' + str(index)
        verilog_filename = hdl_folder_name + '/automata_tt_' + str(
            index) + '.sv'

        # Parse the ANML file
        automata = atma.parse_anml_file(anml_input_file)

        # Minimizing the automata with NFA heuristics

        if minimize:
            print("Minimizing Automata: {}".format(anml_input_file))
            minimize_automata(automata)
            #atma.generate_anml_file(anml_input_file + "_min.anml", automata)
        else:
            print("No minimization of Automata")

        tables, start_states, accept_states = atma.generate_tt(automata)

        inputs = atma.build_truthtable(tables, start_states, accept_states,
                                       module_name, verilog_filename)

        print "Inputs: ", inputs

        # for now, we will use this automata proxy
        automata = Automatanetwork('tt_' + str(index),
                                   True,
                                   1,
                                   255,
                                   inputs=inputs)

        new_node = S_T_E(start_type=StartType.non_start,
                         is_report=True,
                         is_marked=False,
                         id=automata.get_new_id(),
                         symbol_set=None,
                         adjacent_S_T_E_s=None,
                         report_residual=1,
                         report_code=1)

        automata.add_element(new_node)

        print "Number of states: ", automata.nodes_count

        # Register this automaton
        generator_ins.register_automata(atm=automata, use_compression=False)

        # We've got another batch of automata_per_stage automata to stage
        if (index + 1) % automata_per_stage == 0:
            generator_ins.register_stage_pending(single_out=False,
                                                 use_bram=False)

    # DO we need this? maybe if our number of automata is not a perfect multiple
    # of automata_per_stage?
    generator_ins.register_stage_pending(single_out=False, use_bram=False)

    #Finalize and wrap up HDL in archive folder
    generator_ins.finilize()

    # Using gztar to handle LARGE automata workloads
    shutil.make_archive(hdl_folder_name, 'gztar', output_hdl_directory)
    shutil.rmtree(output_hdl_directory)
コード例 #10
0
uat = AnmalZoo.Snort

automatas = atma.parse_anml_file(anml_path[uat])
automatas.remove_ors()
automatas = automatas.get_connected_components_as_automatas()

for atm in automatas[7:8]:
    atm.draw_graph('orig.svg')
    #trad_s1 = atm.get_single_stride_graph()
    trad_s2 = atm.get_single_stride_graph()
    trad_s2.draw_graph('trad_before_homo.svg')
    trad_s2.make_homogenous(use_espresso=False)
    #minimize_automata(trad_s2)
    trad_s2.fix_split_all()
    trad_s2.draw_graph('trad_before.svg')
    minimize_automata(trad_s2, combine_equal_syms_only=True)
    trad_s2.draw_graph('trad.svg')

    #new_s1 = atm.get_single_stride_graph()
    new_s2 = atm.get_single_stride_graph()
    new_s2.draw_graph('new_before_homo.svg')
    new_s2.make_homogenous(use_espresso=True)
    new_s2.draw_graph('new_before.svg')
    minimize_automata(new_s2, combine_equal_syms_only=True, remove_dead_states=False)
    new_s2.draw_graph('new.svg')

    print "trad has {} nodes new has {} nodes".format(trad_s2.nodes_count, new_s2.nodes_count)
    print "trad has {} edges new has {} edges".format(trad_s2.edges_count, new_s2.edges_count)

    #exit(0)
コード例 #11
0
def process_single_ds(uat):

    return_result = {}
    result_dir = out_dir + str(uat)

    shutil.rmtree(result_dir, ignore_errors=True)
    os.mkdir(result_dir)
    exempts = {(AnmalZoo.Snort, 1411)}

    max_target_stride = 2
    uat_count = 200
    automata_per_stage = 50

    automatas = atma.parse_anml_file(anml_path[uat])
    automatas.remove_ors()
    automatas = automatas.get_connected_components_as_automatas()

    after_match_reg = False
    actual_bram = False  # if True, actual bram will be used. Otherwise, LUT emulates bram

    automatas = automatas[:uat_count]
    uat_count = len(automatas)

    number_of_stages = math.ceil(len(automatas) / float(automata_per_stage))
    atms_per_stage = int(math.ceil(len(automatas) / float(number_of_stages)))

    for hom_between, is_Bram in [(False, True)]:

        for stride_val in range(max_target_stride + 1):

            hdl_folder_name = hd_gen.get_hdl_folder_name(
                prefix=str(uat),
                number_of_atms=len(automatas),
                stride_value=stride_val,
                before_match_reg=False,
                after_match_reg=after_match_reg,
                ste_type=1,
                use_bram=is_Bram,
                use_compression=False,
                compression_depth=-1)

            generator_ins = hd_gen.HDL_Gen(
                path=os.path.join(result_dir, hdl_folder_name),
                before_match_reg=False,
                after_match_reg=False,
                ste_type=1,
                total_input_len=automatas[0].max_val_dim_bits_len *
                pow(2, stride_val),
                bram_shape=(512, 36))

            for atm_idx, atm in enumerate(automatas):
                if (uat, atm_idx) in exempts:
                    continue
                print 'processing {0} stride {3} number {1} from {2}'.format(
                    uat, atm_idx, uat_count, stride_val)

                for _ in range(stride_val):
                    if is_Bram is True and hom_between is True and atm.is_homogeneous is False:
                        atm.make_homogenous()
                        atm.make_parentbased_homogeneous()

                    atm = atm.get_single_stride_graph()

                if atm.is_homogeneous is False:
                    atm.make_homogenous()

                minimize_automata(atm)

                if is_Bram is True and hom_between is False:
                    atm.fix_split_all()

                if is_Bram:
                    lut_bram_dic = {
                        n: tuple((2 for _ in range(atm.stride_value)))
                        for n in atm.nodes if n.is_fake is False
                    }
                else:
                    lut_bram_dic = {}

                generator_ins.register_automata(atm=atm,
                                                use_compression=False,
                                                lut_bram_dic=lut_bram_dic)

                if (atm_idx + 1) % atms_per_stage == 0:
                    generator_ins.register_stage_pending(single_out=False,
                                                         use_bram=actual_bram)

            generator_ins.finilize()

    return uat, return_result
コード例 #12
0
#Snort, EntityResolution, ClamAV, Hamming, Dotstart, Custom, Bro217, Levenstein, Bril,
# Randomfor, Dotstar03, ExactMath,Dotstar06, Fermi, PowerEN, Protomata, Dotstart09, Ranges1, SPM, Ranges 05
#SynthBring, Synthcorering

uat = AnmalZoo.Hamming

automatas = atma.parse_anml_file(anml_path[uat])
automata_name = str(uat)

exempts = {(AnmalZoo.Snort, 1411)}

automatas.remove_ors()
automatas = automatas.get_connected_components_as_automatas()

print("Number of automata: ", len(automatas))
print(automata_name)

filed_names = ['#States', '#Edges', 'max_fan_in', 'max_fan_out', 'total_sym']

for atm_idx, atm in enumerate(automatas):
    print "Processing:", uat, " ", atm_idx

    atm1E = atm.get_single_stride_graph()
    atm2E = atm1E.get_single_stride_graph()
    atm2E.make_homogenous(plus_src=False)

    minimize_automata(atm2E)
    atm2E.fix_split_all()

    print "HI"
コード例 #13
0
atm.draw_graph("original.svg")
# draw the autoamton to an svg file


b_atm = atma.automata_network.get_bit_automaton(atm, original_bit_width=atm.max_val_dim_bits_len)
#binary automata

print b_atm.get_summary(logo=" of bitwise automata")
# print information about the binary automata

new_bw_atm = atma.automata_network.get_strided_automata2(atm=b_atm,
                                                          stride_value=dbw, is_scalar=True, base_value=2,
                                                          add_residual=True)
# new automata that has dbw bits per symbol

new_bw_atm.make_homogenous()
# make the autoamta homogeneous

minimize_automata(new_bw_atm)
# minimizing the new autoamata

print new_bw_atm.get_summary(logo=" of %d-bit automata"%(dbw,))
# print summary of the new autoamta

new_bw_atm.draw_graph("new_bw_atm.svg")

print "SVG files were generated."



コード例 #14
0
uat_count = 200

automatas = atma.parse_anml_file(anml_path[uat])
automatas.remove_ors()
automatas = automatas.get_connected_components_as_automatas()

atm = automatas[uat_index]

atm.draw_graph("original.svg")

#eight_stride_1 = atm.get_single_stride_graph()
#eight_stride_1.make_homogenous()
#minimize_automata(eight_stride_1)
#eight_stride_1.draw_graph("eight_bit_strie1.svg")

b_atm = atma.automata_network.get_bit_automaton(
    atm, original_bit_width=atm.max_val_dim_bits_len)
fourbit_atm = atma.automata_network.get_strided_automata2(atm=b_atm,
                                                          stride_value=4,
                                                          is_scalar=True,
                                                          base_value=2,
                                                          add_residual=True)
four_bit_s1 = fourbit_atm.get_single_stride_graph()
four_bit_s2 = four_bit_s1.get_single_stride_graph()

four_bit_s2.make_homogenous()
minimize_automata(four_bit_s2)

print "four bit=", four_bit_s2.nodes_count
four_bit_s2.draw_graph("fourbit_s2.svg")
コード例 #15
0
def thread_func(ds):
    try:

        full_atm = atma.parse_anml_file(anml_path[ds])
        full_atm.remove_ors()
        atms_list = full_atm.get_connected_components_as_automatas()
        atms_list = atms_list[:atms_count]

        with open(str(ds) + '4bit.csv', 'w') as csv_file:
            csv_writer = csv.writer(csv_file,
                                    delimiter=',',
                                    quotechar='"',
                                    quoting=csv.QUOTE_MINIMAL)
            csv_writer.writerow(filed_names)
            for curr_atm in atms_list:
                b_atm = atma.automata_network.get_bit_automaton(
                    curr_atm, original_bit_width=curr_atm.max_val_dim_bits_len)
                four_b_atm = atma.automata_network.get_strided_automata2(
                    atm=b_atm,
                    stride_value=4,
                    is_scalar=True,
                    base_value=2,
                    add_residual=True)
                curr_atm = four_b_atm.get_single_stride_graph()
                curr_atm.make_homogenous()
                minimize_automata(curr_atm)
                curr_atm.fix_split_all()

                match_graph = nx.Graph()

                parent_sym_to_product_sym_dic = {}
                for ste in curr_atm.nodes:
                    if ste.is_fake:
                        continue  # fake root is not considered

                    match_graph.add_node(ste)

                    # here we find the union of parents and find the version with false posetive versions
                    comb_sym_set = PackedIntervalSet(
                        []
                    )  # create an empty symbol sets to find union of parents symbol set
                    for pred in curr_atm.get_predecessors(ste):
                        if pred.is_fake:
                            continue
                        for ivl in pred.symbols.intervals:
                            comb_sym_set.add_interval(
                                ivl
                            )  # finding the union of parents symbol sets

                    #  making symbol set smaller
                    comb_sym_set.prone()
                    comb_sym_set.merge()
                    comb_sym_set = comb_sym_set.get_combinatorial_symbol_set()
                    parent_sym_to_product_sym_dic[ste] = comb_sym_set

                    # now having a graph with no edge, we add edge between nodes if they can be combined with each other
                    # Two nodes can be combined if these two conditions are satisfied. First, they should not have a common symbol
                    # second, their parrents should also not have common symbols

                    for n in match_graph.nodes():
                        if n == ste:
                            continue  # we do not check a node with itself

                        if is_there_common_sym(ste.symbols, n.symbols):
                            continue  # first condition has not been met
                        elif is_there_common_sym(
                                parent_sym_to_product_sym_dic[ste],
                                parent_sym_to_product_sym_dic[n]):
                            continue  # second condition has not been met
                        else:  # both condiotions have been met, we can create an edge between ste, n
                            match_graph.add_edge(ste, n)

                matching_result = matching_alg.max_weight_matching(match_graph)

                print "number of orignal nodes ", curr_atm.nodes_count
                print "number of matching nodes ", 2 * len(matching_result)
                csv_writer.writerow(
                    [curr_atm.nodes_count, 2 * len(matching_result)])
    except Exception as ex:
        print traceback.print_exc()
コード例 #16
0
ファイル: temp.py プロジェクト: anonymousUser0/ASPLOS
my_Automata.add_element(ste1)
my_Automata.add_element(ste2)
my_Automata.add_element(ste3)
my_Automata.add_element(ste4)
my_Automata.add_element(ste5)
my_Automata.add_element(ste6)
my_Automata.add_element(ste7)
my_Automata.add_element(ste8)

my_Automata.add_edge(ste1, ste2)
my_Automata.add_edge(ste2, ste3)
my_Automata.add_edge(ste3, ste4)
my_Automata.add_edge(ste4, ste5)
my_Automata.add_edge(ste5, ste6)
my_Automata.add_edge(ste6, ste7)
my_Automata.add_edge(ste7, ste8)

my_Automata.draw_graph('t.svg')
print my_Automata.get_summary()

my_Automata = my_Automata.get_single_stride_graph()
my_Automata = my_Automata.get_single_stride_graph()

my_Automata.make_homogenous()
minimize_automata(my_Automata)

my_Automata.fix_split_all()

print my_Automata.get_summary()
my_Automata.draw_graph('s.svg')
コード例 #17
0
def process_single_ds(uat):
    try:
        #uat = AnmalZoo.Ranges05

        return_result = {}
        result_dir = out_dir + str(uat)

        shutil.rmtree(result_dir, ignore_errors=True)
        os.mkdir(result_dir)
        exempts = {(AnmalZoo.Snort, 1411)}

        min_target_stride, max_target_stride = 3, 3
        uat_count = 200

        automatas = atma.parse_anml_file(anml_path[uat])
        automatas.remove_ors()
        automatas = automatas.get_connected_components_as_automatas()

        #uat_count = len(automatas)  # comment this to test a subset of automatons defined in uat_count

        automatas = automatas[:uat_count]
        uat_count = len(automatas)

        filed_names = [
            'number_of_states', 'number_of_edges', 'max_fan_in', 'max_fan_out',
            'max_symbol_len', 'min_symbol_len', 'total_sym'
        ]
        for hom_between, is_Bram in [(False, True)]:
            n_states = [0.0 for _ in range(max_target_stride + 1)]
            n_edges = [0.0 for _ in range(max_target_stride + 1)]
            max_fan_in = [0.0 for _ in range(max_target_stride + 1)]
            max_fan_out = [0.0 for _ in range(max_target_stride + 1)]
            max_sym_len = [0.0 for _ in range(max_target_stride + 1)]
            min_sym_len = [0.0 for _ in range(max_target_stride + 1)]
            total_sym = [0.0 for _ in range(max_target_stride + 1)]

            csv_writers = []
            for i in range(max_target_stride + 1):
                f = open(
                    result_dir + '/S' + str(i) + '_' + str(uat_count) +
                    'is_HNH' + str(hom_between) + 'is_Bram' + str(is_Bram) +
                    'len' + str(uat_count) + '.csv', 'w')
                csv_writer = csv.writer(f,
                                        delimiter=',',
                                        quotechar='"',
                                        quoting=csv.QUOTE_MINIMAL)
                csv_writer.writerow(filed_names)
                csv_writers.append(csv_writer)

            for atm_idx, atm in enumerate(automatas):
                b_atm = atma.automata_network.get_bit_automaton(
                    atm, original_bit_width=atm.max_val_dim_bits_len)
                atm = atma.automata_network.get_strided_automata2(
                    atm=b_atm,
                    stride_value=4,
                    is_scalar=True,
                    base_value=2,
                    add_residual=True)

                for stride_val in reversed(
                        range(min_target_stride, max_target_stride + 1)):
                    if (uat, atm_idx) in exempts:
                        continue
                    print 'processing {0} stride {3} number {1} from {2}'.format(
                        uat, atm_idx, uat_count, stride_val)
                    s_atm = atm

                    for _ in range(stride_val):
                        if s_atm is atm:
                            s_atm = atm.get_single_stride_graph()
                        else:
                            s_atm = s_atm.get_single_stride_graph()

                    if s_atm.is_homogeneous is False:
                        s_atm.make_homogenous()

                    minimize_automata(s_atm)

                    if is_Bram is True and hom_between is False:
                        s_atm.fix_split_all()

                    all_nodes = filter(lambda n: n.id != 0,
                                       s_atm.nodes)  # filter fake root
                    all_nodes_symbols_len_count = [
                        len(n.symbols) for n in all_nodes
                    ]

                    n_s = s_atm.nodes_count
                    n_states[stride_val] += n_s

                    n_e = s_atm.edges_count
                    n_edges[stride_val] += n_e

                    m_f_i = s_atm.max_STE_in_degree()
                    max_fan_in[stride_val] += m_f_i

                    m_f_o = s_atm.max_STE_out_degree()
                    max_fan_out[stride_val] += m_f_o

                    mx_s_l = max(all_nodes_symbols_len_count)
                    max_sym_len[stride_val] += mx_s_l

                    mn_s_l = min(all_nodes_symbols_len_count)
                    min_sym_len[stride_val] += mn_s_l

                    t_s = sum(all_nodes_symbols_len_count)
                    total_sym[stride_val] += t_s

                    csv_writers[stride_val].writerow(
                        [n_s, n_e, m_f_i, m_f_o, mx_s_l, mn_s_l, t_s])

            del csv_writers
            for i in range(max_target_stride + 1):
                n_states[i] /= uat_count
                n_edges[i] /= uat_count
                max_fan_in[i] /= uat_count
                max_fan_out[i] /= uat_count
                max_sym_len[i] /= uat_count
                min_sym_len[i] /= uat_count
                total_sym[i] /= uat_count

                return_result[(is_Bram, i)] = (n_states, n_edges, max_fan_in,
                                               max_fan_out, max_sym_len,
                                               min_sym_len, total_sym)

                with open(out_dir + 'summary.txt', 'a+') as f:
                    fcntl.flock(f, fcntl.LOCK_EX | fcntl.LOCK_NB)
                    to_w_lns = []
                    to_w_lns.append(
                        str(uat) + "L" + str(uat_count) + "S" + str(i) +
                        "BRam" + str(is_Bram) + "\n")
                    to_w_lns.append("    average number of states = " +
                                    str(n_states[i]) + "\n")
                    to_w_lns.append("    average number of edges = " +
                                    str(n_edges[i]) + "\n")
                    to_w_lns.append("    average max fan-in = " +
                                    str(max_fan_in[i]) + "\n")
                    to_w_lns.append("    average max fan-out = " +
                                    str(max_fan_out[i]) + "\n")
                    to_w_lns.append("    average max sym-len = " +
                                    str(max_sym_len[i]) + "\n")
                    to_w_lns.append("    average min sym-len = " +
                                    str(min_sym_len[i]) + "\n")
                    to_w_lns.append("    average total_sym-len = " +
                                    str(total_sym[i]) + "\n")
                    f.writelines(to_w_lns)
                    fcntl.flock(f, fcntl.LOCK_UN)

        return uat, return_result
    except Exception, e:
        tracebackString = traceback.format_exc(e)
        print tracebackString
        raise StandardError, "\n\nError occurred. Original traceback is\n%s\n" % (
            tracebackString)
コード例 #18
0
def process_anml(bitwidth, input_directory, automata_per_stage):

    # This is the directory name to be created for HDL files
    output_hdl_directory = input_directory + '/' + str(bitwidth) + '_' + str(
        automata_per_stage)

    anml_input_files = glob.glob(input_directory + '/*.anml')

    # Clean up directory
    shutil.rmtree(output_hdl_directory, ignore_errors=True)
    os.mkdir(output_hdl_directory)

    # Create a directory name for the HDL code
    hdl_folder_name = hd_gen.get_hdl_folder_name(
        prefix=output_hdl_directory,
        number_of_atms=len(anml_input_files),
        stride_value=0,
        before_match_reg=False,
        after_match_reg=False,
        ste_type=1,
        use_bram=False,
        use_compression=False,
        compression_depth=-1,
        symbolic=False)

    # Create a hardware Generator
    generator_ins = hd_gen.HDL_Gen(path=hdl_folder_name,
                                   before_match_reg=False,
                                   after_match_reg=False,
                                   ste_type=1,
                                   total_input_len=dbw,
                                   symbolic=False)

    # Iterate through the ANML files in the directory
    for index, anml_input_file in enumerate(anml_input_files):

        # Grab the automata file number
        automata_number = re.search('\d+', anml_input_file).group(0)

        # Parse the ANML file
        automata = atma.parse_anml_file(anml_input_file)

        if dbw == 16:
            print "Doing 16-bit!"
            automata_with_set_bw = automata.get_single_stride_graph()
        else:
            print "Doing 8-bit!"
            automata_with_set_bw = automata

        assert dbw == automata_with_set_bw.total_bits_len, "Bitwidth assumption is incorrect!"

        if not automata_with_set_bw.is_homogeneous:
            print "Converting to homogeneous automaton"
            automata_with_set_bw.make_homogenous()

        # Minimizing the automata with NFA heuristics
        if minimize:
            minimize_automata(automata_with_set_bw)
            #atma.generate_anml_file(anml_input_file + "_min.anml", automata)
        else:
            print("No minimization of Automata")

        # Drawing automata graph
        if drawing:
            print "Drawing automata svg graph"
            automata_with_set_bw.draw_graph(anml_input_file +
                                            "_minimized_hw.svg")

        automata_info.append('{},{},{}\n'.format(
            automata_number, str(automata_with_set_bw.nodes_count),
            str(automata_with_set_bw.edges_count)))

        # # Register this automaton
        generator_ins.register_automata(atm=automata_with_set_bw,
                                        use_compression=False)

        # We've got another batch of automata_per_stage automata to stage
        if (index + 1) % automata_per_stage == 0:
            generator_ins.register_stage_pending(single_out=False,
                                                 use_bram=False)

    # DO we need this? maybe if our number of automata is not a perfect multiple
    # of automata_per_stage?
    generator_ins.register_stage_pending(single_out=False, use_bram=False)

    #Finalize and wrap up HDL in archive folder
    generator_ins.finilize()

    # Using gztar to handle LARGE automata workloads
    shutil.make_archive(hdl_folder_name, 'gztar', output_hdl_directory)
    shutil.rmtree(output_hdl_directory)

    # Write the automata node and edge count to a file
    with open(hdl_folder_name + '.stats', 'w') as output_file:
        output_file.write("Number of States, Number of Edges\n")
        output_file.write("---------------------------------\n")
        for automata_string in automata_info:
            output_file.write(automata_string)
コード例 #19
0
ファイル: compression_results.py プロジェクト: j-c-w/APSim
def process_single_ds(uat):
    all_automata = atma.parse_anml_file(anml_path[uat])
    all_automata.remove_ors()
    automatas = all_automata.get_connected_components_as_automatas()

    if len(automatas) > number_of_autoamtas:
        #automatas = random.sample(automatas, number_of_autoamtas)
        automatas = automatas[:number_of_autoamtas]

    number_of_stages = math.ceil(len(automatas) / float(automata_per_stage))
    atms_per_stage = int(math.ceil(len(automatas) / float(number_of_stages)))

    for stride_val in range(1):

        hdl_apth = hd_gen.get_hdl_folder_name(
            prefix="comptest" + str(uat),
            number_of_atms=len(automatas),
            stride_value=stride_val,
            before_match_reg=before_match_reg,
            after_match_reg=after_match_reg,
            ste_type=ste_type,
            use_bram=use_bram,
            use_compression=use_compression,
            compression_depth=compression_depth)

        generator_ins = hd_gen.HDL_Gen(
            path=hdl_apth,
            before_match_reg=before_match_reg,
            after_match_reg=after_match_reg,
            ste_type=ste_type,
            total_input_len=hd_gen.HDL_Gen.get_bit_len(
                all_automata.max_val_dim) * pow(2, stride_val))

        for atm_idx, atm in enumerate(automatas):
            if (uat, atm_idx) in exempts:
                continue

            print 'processing {0} stride{3} automata {1} from {2}'.format(
                uat, atm_idx + 1, len(automatas), stride_val)

            bc_bits_len = 8
            if use_compression:
                bc_sym_dict = get_equivalent_symbols([atm], replace=True)
                bc_bits_len = int(
                    math.ceil(math.log(max(bc_sym_dict.values()), 2)))

            translation_list = []

            for s in range(stride_val):
                atm = atm.get_single_stride_graph()
                if use_compression and s < compression_depth:
                    new_translation = get_equivalent_symbols([atm],
                                                             replace=True)
                    translation_list.append(new_translation)

            if atm.is_homogeneous is False:
                atm.make_homogenous()

            minimize_automata(atm)

            #lut_bram_dic = {n: (1, 2) for n in atm.nodes}
            generator_ins.register_automata(
                atm=atm,
                use_compression=use_compression,
                byte_trans_map=bc_sym_dict if use_compression else None)

            if use_compression:
                generator_ins.register_compressor(
                    [atm.id],
                    byte_trans_map=bc_sym_dict,
                    translation_list=translation_list)

            if (atm_idx + 1) % atms_per_stage == 0:
                generator_ins.register_stage_pending(use_bram=use_bram)

        generator_ins.finilize()
コード例 #20
0
def process_single_ds(uat):

    #uat = AnmalZoo.Ranges05

    return_result = {}
    result_dir = out_dir + str(uat)

    shutil.rmtree(result_dir, ignore_errors=True)
    os.mkdir(result_dir)
    exempts = {(AnmalZoo.Snort, 1411)}

    max_target_stride = 2
    uat_count = 200
    automata_per_stage = 50

    automatas = atma.parse_anml_file(anml_path[uat])
    automatas.remove_ors()
    automatas = automatas.get_connected_components_as_automatas()

    after_match_reg = False
    actual_bram = False  # if True, actual bram will be used. Otherwise, LUT emulates bram


    #uat_count = len(automatas)  # comment this to test a subset of automatons defined in uat_count

    automatas = automatas[:uat_count]
    uat_count = len(automatas)

    number_of_stages = math.ceil(len(automatas) / float(automata_per_stage))
    atms_per_stage = int(math.ceil(len(automatas) / float(number_of_stages)))

    for hom_between, is_Bram in [(False, True)]:
        hdl_writers = []
        for i in range(max_target_stride + 1):
            hdl_folder_name = hd_gen.get_hdl_folder_name(prefix=str(uat), number_of_atms=len(automatas),
                                                         stride_value=i, before_match_reg=False,
                                                         after_match_reg=after_match_reg, ste_type=1, use_bram=is_Bram,
                                                         use_compression=False, compression_depth=-1)

            generator_ins = hd_gen.HDL_Gen(path=os.path.join(result_dir, hdl_folder_name), before_match_reg=False,
                                           after_match_reg=after_match_reg, ste_type=1,
                                           total_input_len=4 * pow(2, i),
                                           bram_shape=(512, 36))
            hdl_writers.append(generator_ins)


        for atm_idx, atm in enumerate(automatas):
            b_atm = atma.automata_network.get_bit_automaton(atm, original_bit_width=atm.max_val_dim_bits_len)
            atm = atma.automata_network.get_strided_automata2(atm=b_atm,
                                                              stride_value=4,
                                                              is_scalar=True,
                                                              base_value=2,
                                                              add_residual=True)

            for stride_val in reversed(range(max_target_stride + 1)):
                if (uat, atm_idx) in exempts:
                    continue
                print 'processing {0} stride {3} number {1} from {2}'.format(uat, atm_idx, uat_count, stride_val)
                s_atm = atm

                for _ in range(stride_val):
                    if s_atm is atm:
                        s_atm = atm.get_single_stride_graph()
                    else:
                        s_atm = s_atm.get_single_stride_graph()

                if s_atm.is_homogeneous is False:
                    s_atm.make_homogenous()

                minimize_automata(s_atm)

                if is_Bram is True and hom_between is False:
                    s_atm.fix_split_all()

                if is_Bram:
                    lut_bram_dic = {n: tuple((2 for _ in range(s_atm.stride_value))) for n in s_atm.nodes if
                                    n.is_fake is False}
                else:
                    lut_bram_dic = {}

                hdl_writers[stride_val].register_automata(atm=s_atm, use_compression=False, lut_bram_dic=lut_bram_dic)

                if (atm_idx + 1) % atms_per_stage == 0:
                    hdl_writers[stride_val].register_stage_pending(single_out=False, use_bram=actual_bram)

        for i in range(max_target_stride + 1):
            hdl_writers[i].finilize()

    return uat, return_result
コード例 #21
0
ファイル: placement_with_cond.py プロジェクト: tjt7a/APSim
draw_individually = False

report_locs_G4 = set([128 + (i * 256) + j for i in range(4) for j in range(8)])

for anml in [AnmalZoo.EntityResolution]:
    automata = atma.parse_anml_file(anml_path[anml])
    automata.remove_ors()
    #utility.minimize_automata(automata)
    # accumulative switch map
    ccs = automata.get_connected_components_as_automatas()

    residual_atms, same_incon_list, curr_node_count, residual_node_count = [], [], 0, 0

    for atm_idx, atm in enumerate(ccs):

        utility.minimize_automata(atm)
        atm.remove_all_start_nodes()

        atm1 = atma.automata_network.get_bit_automaton(atm,
                                                       original_bit_width=8)
        print "finished bitautomata for %d" % (atm_idx)
        atm4 = atma.automata_network.get_strided_automata2(atm=atm1,
                                                           stride_value=4,
                                                           is_scalar=True,
                                                           base_value=2,
                                                           add_residual=True)
        atm8 = atm4.get_single_stride_graph()

        atm8.make_homogenous()
        print "finished homogeneous for %d" % (atm_idx)
コード例 #22
0
ファイル: stride_16_bram.py プロジェクト: tjt7a/APSim
def process_single_ds(uat):
    exempts = {(AnmalZoo.Snort, 1411)}

    automata_per_stage = 50
    # this is a pipelineing parameter for staging as pipeline. We usually use 50 for this parameter

    automatas = atma.parse_anml_file(anml_path[uat])
    automatas.remove_ors()
    automatas = automatas.get_connected_components_as_automatas()

    #uat_count = 10  # number of automata to be processed
    uat_count = len(
        automatas
    )  # comment this to test a subset of automatons defined in uat_count
    automatas = automatas[:uat_count]
    uat_count = len(automatas)
    number_of_stages = int(
        math.ceil(len(automatas) / float(automata_per_stage)))
    # number of pipleine stages

    atms_per_stage = int(math.ceil(len(automatas) / float(number_of_stages)))

    hdl_folder_name = hd_gen.get_hdl_folder_name(prefix=str(uat),
                                                 number_of_atms=len(automatas),
                                                 stride_value=stride,
                                                 before_match_reg=False,
                                                 after_match_reg=False,
                                                 ste_type=1,
                                                 use_bram=True,
                                                 use_compression=False,
                                                 compression_depth=-1,
                                                 use_mid_fifo=use_mid_fifo,
                                                 use_rst=use_rst)

    print "folder name to store the HDLs:", hdl_folder_name

    generator_ins = hd_gen.HDL_Gen(
        path=os.path.join("/home/gr5yf/FCCM_2020/bram16", hdl_folder_name),
        before_match_reg=False,
        after_match_reg=False,
        ste_type=1,
        total_input_len=automatas[0].max_val_dim_bits_len * pow(2, stride),
        use_mid_fifo=use_mid_fifo,
        use_rst=use_rst,
        bram_shape=(512, 36))

    for atm_idx, atm in enumerate(automatas):
        if (uat, atm_idx) in exempts:
            continue
        print 'processing {0} number {1} from {2}'.format(
            uat, atm_idx + 1, uat_count)

        for s in range(stride):
            atm = atm.get_single_stride_graph()

        atm.make_homogenous()
        minimize_automata(atm)
        atm.fix_split_all()
        lut_bram_dic = {
            n: tuple((2 for _ in range(atm.stride_value)))
            for n in atm.nodes if n.is_fake is False
        }

        generator_ins.register_automata(atm=atm,
                                        use_compression=False,
                                        lut_bram_dic=lut_bram_dic)

        if (atm_idx + 1) % atms_per_stage == 0:
            generator_ins.register_stage_pending(use_bram=True)

    generator_ins.register_stage_pending(use_bram=True)

    generator_ins.finilize(dataplane_intcon_max_degree=5,
                           contplane_intcon_max_degree=10)

    return
コード例 #23
0
org_atm.draw_graph('original.svg')

bit_atm=get_bit_automaton(atm=org_atm, original_bit_width=3)
#bit_atm.draw_graph('bitwise.svg')
print bit_atm.get_summary(logo='bitwise')

strided_b_atm=get_strided_automata2(atm=bit_atm, stride_value=8, is_scalar=True, base_value=2, add_residual=False)
print strided_b_atm.get_summary(logo='strided bitwise')
#strided_b_atm.draw_graph('strided.svg')

strided_b_atm.make_homogenous()
print strided_b_atm.get_summary(logo='homogeneous')
#strided_b_atm.draw_graph('homogeneous.svg', draw_edge_label=True)


minimize_automata(strided_b_atm, same_residuals_only=False)
print strided_b_atm.get_summary(logo='minimized')
strided_b_atm.draw_graph('minimized.svg')

compare_input(True, True, False, None, org_atm, strided_b_atm)
exit(0)

for s in automatas:

    stride_dict_list = []
    for i in range(4):

        symbol_dict, symbol_dictionary_list = get_equivalent_symbols([s], replace=True)

        if i == 0:
            initial_dic = symbol_dict
コード例 #24
0
ファイル: controversey.py プロジェクト: anonymousUser0/ASPLOS
my_Automata.add_edge(ste3, ste4)
my_Automata.add_edge(ste4, ste5)
my_Automata.add_edge(ste5, ste6)
my_Automata.add_edge(ste6, ste7)
my_Automata.add_edge(ste7, ste8)
my_Automata.add_edge(ste4, ste9)
my_Automata.add_edge(ste9, ste10)
my_Automata.add_edge(ste10, ste11)
my_Automata.add_edge(ste11, ste8)

my_Automata.draw_graph('t.svg')

my_Automata = my_Automata.get_single_stride_graph()
my_Automata.make_homogenous(plus_src=True)
my_Automata.draw_graph('t2.svg')
my_Automata = my_Automata.get_single_stride_graph()

my_Automata.make_homogenous()
minimize_automata(my_Automata, combine_equal_syms_only=True)
my_Automata.fix_split_all()

my_Automata.draw_graph('s2.svg')

print my_Automata.get_summary()

for n in my_Automata.nodes:
    if n.start_type == StartType.fake_root:
        continue
    s = n.is_symbolset_splitable()
    if not s:
        print n.id
コード例 #25
0
                                                      original_bit_width=8)
#one_bit_atm.make_homogenous()
#minimize_automata(one_bit_atm)

print "finished bit level"
#one_bit_atm.draw_graph("one_bit.svg")

if two_bit:
    two_bit_atm = atma.automata_network.get_strided_automata2(
        atm=one_bit_atm,
        stride_value=2,
        is_scalar=True,
        base_value=2,
        add_residual=False)
    two_bit_atm.make_homogenous()
    minimize_automata(two_bit_atm)
    #two_bit_atm.draw_graph("two_bit.svg")

if three_bit:
    three_bit_atm = atma.automata_network.get_strided_automata2(
        atm=one_bit_atm,
        stride_value=3,
        is_scalar=True,
        base_value=2,
        add_residual=False)
    three_bit_atm.make_homogenous()
    minimize_automata(three_bit_atm)
    #three_bit_atm.draw_graph("three_bit.svg")

if four_bit:
    four_bit_atm = atma.automata_network.get_strided_automata2(
コード例 #26
0
from automata.utility.utility import minimize_automata


# Get the usage string
def usage():
    usage = "----------------- Usage ----------------\n"
    usage += "./count_explicit_states.py <ANML File>"
    return usage

# Entry point
if __name__ == '__main__':
    
    # Check the correct number of command line arguments
    if len(sys.argv) != 2:
        print(usage())
        exit(-1)

    # Grab the input filename
    anml_input = sys.argv[1]
 
    # Parse the ANML file
    automata = atma.parse_anml_file(anml_input)
    before = automata.nodes_count

    # Minimizing the automata with NFA heuristics
    minimize_automata(automata)

    after = automata.nodes_count

    print(str(before) + ", " + str(after))
コード例 #27
0
def process_single_ds(uat):

    #uat = AnmalZoo.Ranges05

    return_result = {}
    result_dir = out_dir + str(uat)

    shutil.rmtree(result_dir, ignore_errors=True)
    os.mkdir(result_dir)
    exempts = {(AnmalZoo.Snort, 1411)}

    max_target_stride = 2
    uat_count = 200

    automatas = atma.parse_anml_file(anml_path[uat])
    automatas.remove_ors()
    automatas = automatas.get_connected_components_as_automatas()


    #uat_count = len(automatas)  # comment this to test a subset of automatons defined in uat_count

    automatas = automatas[:uat_count]
    uat_count = len(automatas)



    filed_names = ['number_of_states', 'number_of_edges', 'max_fan_in', 'max_fan_out',
                   'max_symbol_len', 'min_symbol_len', 'total_sym']
    for hom_between, is_Bram in [(False, False),  (False, True)]:

        for stride_val in range(max_target_stride + 1):
            n_states = 0.0
            n_edges = 0.0
            max_fan_in = 0.0
            max_fan_out = 0.0
            max_sym_len = 0.0
            min_sym_len = 0.0
            total_sym = 0.0

            with open(result_dir + '/S' + str(stride_val) + '_' + str(uat_count) +
                      'is_HNH' + str(hom_between) + 'is_Bram' + str(is_Bram) + 'len' + str(len(automatas)) +'.csv', 'w') as csv_file:
                csv_writer = csv.writer(csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
                csv_writer.writerow(filed_names)

                for atm_idx, atm in enumerate(automatas):
                    if (uat, atm_idx) in exempts:
                        continue
                    print 'processing {0} stride {3} number {1} from {2}'.format(uat, atm_idx, uat_count, stride_val)

                    for _ in range(stride_val):
                        if is_Bram is True and hom_between is True and atm.is_homogeneous is False:
                            atm.make_homogenous()
                            atm.make_parentbased_homogeneous()

                        atm = atm.get_single_stride_graph()

                    if atm.is_homogeneous is False:
                        atm.make_homogenous()

                    minimize_automata(atm)

                    if is_Bram is True and hom_between is False:
                        atm.fix_split_all()

                    all_nodes = filter(lambda n:n.id != 0, atm.nodes)  # filter fake root
                    all_nodes_symbols_len_count = [len(n.symbols) for n in all_nodes]

                    n_s = atm.nodes_count
                    n_states += n_s

                    n_e = atm.edges_count
                    n_edges += n_e

                    m_f_i = atm.max_STE_in_degree()
                    max_fan_in += m_f_i

                    m_f_o = atm.max_STE_out_degree()
                    max_fan_out += m_f_o

                    mx_s_l = max(all_nodes_symbols_len_count)
                    max_sym_len += mx_s_l

                    mn_s_l = min(all_nodes_symbols_len_count)
                    min_sym_len += mn_s_l

                    t_s = sum(all_nodes_symbols_len_count)
                    total_sym += t_s

                    csv_writer.writerow([n_s, n_e, m_f_i, m_f_o, mx_s_l, mn_s_l, t_s])

            n_states /= uat_count
            n_edges /= uat_count
            max_fan_in /= uat_count
            max_fan_out /= uat_count
            max_sym_len /= uat_count
            min_sym_len /= uat_count
            total_sym /= uat_count

            return_result[(is_Bram, stride_val)] = (n_states,
                                                    n_edges,
                                                    max_fan_in,
                                                    max_fan_out,
                                                    max_sym_len,
                                                    min_sym_len,
                                                    total_sym)

            with open(out_dir + 'summary.txt', 'a+') as f:
                fcntl.flock(f, fcntl.LOCK_EX | fcntl.LOCK_NB)
                to_w_lns =[]
                to_w_lns.append(str(uat) + "L" + str(uat_count) + "S" + str(stride_val) + "BRam" + str(is_Bram) + "\n")
                to_w_lns.append("    average number of states = " + str(n_states) + "\n")
                to_w_lns.append("    average number of edges = " + str(n_edges) + "\n")
                to_w_lns.append("    average max fan-in = " + str(max_fan_in) + "\n")
                to_w_lns.append("    average max fan-out = " + str(max_fan_out) + "\n")
                to_w_lns.append("    average max sym-len = " + str(max_sym_len) + "\n")
                to_w_lns.append("    average min sym-len = " + str(min_sym_len) + "\n")
                to_w_lns.append("    average total_sym-len = " + str(total_sym) + "\n")
                f.writelines(to_w_lns)
                fcntl.flock(f, fcntl.LOCK_UN)

    return uat, return_result
コード例 #28
0
ファイル: Table3.py プロジェクト: tjt7a/APSim
def process_single_ds(uat):
    try:

        return_result = {}
        result_dir = out_dir_prefix + str(uat)

        shutil.rmtree(result_dir, ignore_errors=True)
        os.mkdir(result_dir)
        # cleaning the result folder

        automata_per_stage = 50
        # this is a pipelineing parameter for staging as pipeline. We usually use 50 for this parameter

        automatas = atma.parse_anml_file(anml_path[uat])
        automatas.remove_ors()
        automatas = automatas.get_connected_components_as_automatas()

        uat_count = 100  # number of automata to be processed
        # uat_count = len(automatas)  # comment this to test a subset of automatons defined in uat_count

        automatas = automatas[:uat_count]
        uat_count = len(automatas)

        number_of_stages = math.ceil(
            len(automatas) / float(automata_per_stage))
        # number of pipleine stages

        atms_per_stage = int(
            math.ceil(len(automatas) / float(number_of_stages)))

        hdl_folder_name = hd_gen.get_hdl_folder_name(
            prefix=str(uat),
            number_of_atms=len(automatas),
            stride_value=0,
            before_match_reg=False,
            after_match_reg=False,
            ste_type=1,
            use_bram=False,
            use_compression=False,
            compression_depth=-1)

        print "folder name to store the HDLs:", hdl_folder_name

        generator_ins = hd_gen.HDL_Gen(path=os.path.join(
            result_dir, hdl_folder_name),
                                       before_match_reg=False,
                                       after_match_reg=False,
                                       ste_type=1,
                                       total_input_len=dbw)

        for atm_idx, atm in enumerate(automatas):
            print 'processing {0} number {1} from {2}'.format(
                uat, atm_idx + 1, uat_count)
            b_atm = atma.automata_network.get_bit_automaton(
                atm, original_bit_width=atm.max_val_dim_bits_len)
            atm = atma.automata_network.get_strided_automata2(
                atm=b_atm,
                stride_value=dbw,
                is_scalar=True,
                base_value=2,
                add_residual=True)
            atm.make_homogenous()
            minimize_automata(atm)

            generator_ins.register_automata(atm=atm, use_compression=False)

            if (atm_idx + 1) % atms_per_stage == 0:
                generator_ins.register_stage_pending(use_bram=False)

        generator_ins.finilize()
        shutil.make_archive(hdl_folder_name, 'zip', result_dir)
        shutil.rmtree(result_dir)

        return uat, return_result
    except Exception as ex:
        print ex
        raise ex
コード例 #29
0
    if len(automatas) > number_of_autoamtas:
        #automatas = random.sample(automatas, number_of_autoamtas)
        automatas = automatas[:number_of_autoamtas]

    for bit_stride_val in [8]:

        strided_automatas, bit_size, = [], []
        for atm_idx, atm in enumerate(automatas):
            if (uat, atm_idx) in exempts:
                continue
            old_count = atm.nodes_count

            atm = atma.automata_network.get_bit_automaton(
                atm=atm, original_bit_width=atm.max_val_dim.bit_length())
            #print "finished bitwise"
            bit_stride_atm = atma.automata_network.get_strided_automata2(
                atm=atm,
                stride_value=bit_stride_val,
                is_scalar=True,
                base_value=2)

            #print 'finished {0} stride{3} automata {1} from {2}'.format(uat, atm_idx, len(automatas), bit_stride_val)

            if bit_stride_atm.is_homogeneous is False:
                bit_stride_atm.make_homogenous(use_espresso=True)

            minimize_automata(bit_stride_atm)

            print "old bit counts {} new bit count {}".format(
                old_count, bit_stride_atm.nodes_count)