Example #1
0
        #automatas = random.sample(automatas, number_of_autoamtas)
        automatas = automatas[:number_of_autoamtas]


    number_of_stages = math.ceil(len(automatas) / float(automata_per_stage))
    atms_per_stage = int(math.ceil(len(automatas) / float(number_of_stages)))

    for bit_stride_val in [9]:

        hdl_apth = hd_gen.get_hdl_folder_name(prefix=str(uat), number_of_atms=len(automatas), stride_value=bit_stride_val,
                                              before_match_reg=before_match_reg, after_match_reg=after_match_reg,
                                              ste_type=ste_type, use_bram=use_bram, use_compression=use_compression,
                                              compression_depth=compression_depth)

        generator_ins = hd_gen.HDL_Gen(path=hdl_apth, before_match_reg=before_match_reg,
                                       after_match_reg=after_match_reg, ste_type=ste_type,
                                       total_input_len=bit_stride_val)

        strided_automatas, bit_size,  = [], []
        for atm_idx, atm in enumerate(automatas):
            if (uat, atm_idx) in exempts:
                continue

            print atm.get_summary()
            atm = atma.automata_network.get_bit_automaton(atm=atm, original_bit_width=hd_gen.HDL_Gen.get_bit_len(atm.max_val_dim))
            atm = atma.automata_network.get_strided_automata2(atm=atm, stride_value=bit_stride_val, is_scalar=True,
                                                              base_value=2, add_residual=False)

            with open('nine_bit_snort.pkl', 'wb') as f:
                pickle.dump(atm, f, pickle.HIGHEST_PROTOCOL)
Example #2
0
def process_single_ds(uat):
    exempts = {(AnmalZoo.Snort, 1411)}

    automata_per_stage = 50
    # this is a pipelineing parameter for staging as pipeline. We usually use 50 for this parameter

    automatas = atma.parse_anml_file(anml_path[uat])
    automatas.remove_ors()
    automatas = automatas.get_connected_components_as_automatas()

    #uat_count = 10  # number of automata to be processed
    uat_count = len(
        automatas
    )  # comment this to test a subset of automatons defined in uat_count
    automatas = automatas[:uat_count]
    uat_count = len(automatas)
    number_of_stages = int(
        math.ceil(len(automatas) / float(automata_per_stage)))
    # number of pipleine stages

    atms_per_stage = int(math.ceil(len(automatas) / float(number_of_stages)))

    hdl_folder_name = hd_gen.get_hdl_folder_name(prefix=str(uat),
                                                 number_of_atms=len(automatas),
                                                 stride_value=stride,
                                                 before_match_reg=False,
                                                 after_match_reg=False,
                                                 ste_type=1,
                                                 use_bram=True,
                                                 use_compression=False,
                                                 compression_depth=-1,
                                                 use_mid_fifo=use_mid_fifo,
                                                 use_rst=use_rst)

    print "folder name to store the HDLs:", hdl_folder_name

    generator_ins = hd_gen.HDL_Gen(
        path=os.path.join("/home/gr5yf/FCCM_2020/bram16", hdl_folder_name),
        before_match_reg=False,
        after_match_reg=False,
        ste_type=1,
        total_input_len=automatas[0].max_val_dim_bits_len * pow(2, stride),
        use_mid_fifo=use_mid_fifo,
        use_rst=use_rst,
        bram_shape=(512, 36))

    for atm_idx, atm in enumerate(automatas):
        if (uat, atm_idx) in exempts:
            continue
        print 'processing {0} number {1} from {2}'.format(
            uat, atm_idx + 1, uat_count)

        for s in range(stride):
            atm = atm.get_single_stride_graph()

        atm.make_homogenous()
        minimize_automata(atm)
        atm.fix_split_all()
        lut_bram_dic = {
            n: tuple((2 for _ in range(atm.stride_value)))
            for n in atm.nodes if n.is_fake is False
        }

        generator_ins.register_automata(atm=atm,
                                        use_compression=False,
                                        lut_bram_dic=lut_bram_dic)

        if (atm_idx + 1) % atms_per_stage == 0:
            generator_ins.register_stage_pending(use_bram=True)

    generator_ins.register_stage_pending(use_bram=True)

    generator_ins.finilize(dataplane_intcon_max_degree=5,
                           contplane_intcon_max_degree=10)

    return
Example #3
0
        hdl_apth = hd_gen.get_hdl_folder_path(
            prefix="bramtest" + str(uat),
            number_of_atms=len(automatas),
            stride_value=stride_val,
            before_match_reg=before_match_reg,
            after_match_reg=after_match_reg,
            ste_type=ste_type,
            use_bram=use_bram,
            use_compression=False,
            compression_depth=-1)

        generator_ins = hd_gen.HDL_Gen(
            path=hdl_apth,
            before_match_reg=before_match_reg,
            after_match_reg=after_match_reg,
            ste_type=ste_type,
            total_input_len=automatas[0].max_val_dim_bits_len *
            pow(2, stride_val),
            bram_shape=(512, 36))

        for atm_idx, atm in enumerate(automatas):
            if (uat, atm_idx) in exempts:
                continue

            print 'processing {0} stride{3} automata {1} from {2}'.format(
                uat, atm_idx, len(automatas), stride_val)

            translation_list = []

            for s in range(stride_val):
                atm = atm.get_single_stride_graph()
Example #4
0
def process_single_ds(uat):

    return_result = {}
    #result_dir = out_dir_prefix + str(uat)

    #shutil.rmtree(result_dir, ignore_errors=True)
    #os.mkdir(result_dir)
    # cleaning the result folder

    automata_per_stage = 100
    # this is a pipelineing parameter for staging as pipeline. We usually use 50 for this parameter

    automatas = atma.parse_anml_file(anml_path[uat])
    automatas.remove_ors()
    automatas = automatas.get_connected_components_as_automatas()

    #uat_count = 10  # number of automata to be processed
    uat_count = len(
        automatas
    )  # comment this to test a subset of automatons defined in uat_count
    automatas = automatas[:uat_count]
    uat_count = len(automatas)
    number_of_stages = int(
        math.ceil(len(automatas) / float(automata_per_stage)))
    # number of pipleine stages

    atms_per_stage = int(math.ceil(len(automatas) / float(number_of_stages)))

    hdl_folder_name = hd_gen.get_hdl_folder_name(prefix=str(uat),
                                                 number_of_atms=len(automatas),
                                                 stride_value=0,
                                                 before_match_reg=False,
                                                 after_match_reg=False,
                                                 ste_type=1,
                                                 use_bram=True,
                                                 use_compression=False,
                                                 compression_depth=-1,
                                                 use_mid_fifo=use_mid_fifo,
                                                 use_rst=use_rst)

    print "folder name to store the HDLs:", hdl_folder_name

    generator_ins = hd_gen.HDL_Gen(path=os.path.join(
        "/home/gr5yf/FCCM_2020/bram8", hdl_folder_name),
                                   before_match_reg=False,
                                   after_match_reg=False,
                                   ste_type=1,
                                   total_input_len=dbw,
                                   use_mid_fifo=use_mid_fifo,
                                   use_rst=use_rst,
                                   bram_shape=(512, 36))

    for atm_idx, atm in enumerate(automatas):
        print 'processing {0} number {1} from {2}'.format(
            uat, atm_idx + 1, uat_count)
        minimize_automata(atm)
        lut_bram_dic = {
            n: tuple((2 for _ in range(atm.stride_value)))
            for n in atm.nodes if n.is_fake is False
        }

        generator_ins.register_automata(atm=atm,
                                        use_compression=False,
                                        lut_bram_dic=lut_bram_dic)

        if (atm_idx + 1) % atms_per_stage == 0:
            generator_ins.register_stage_pending(use_bram=True)

    generator_ins.register_stage_pending(use_bram=True)

    generator_ins.finilize(dataplane_intcon_max_degree=5,
                           contplane_intcon_max_degree=10)

    return uat, return_result
def process_single_ds(uat):

    #uat = AnmalZoo.Ranges05

    return_result = {}
    result_dir = out_dir + str(uat)

    shutil.rmtree(result_dir, ignore_errors=True)
    os.mkdir(result_dir)
    exempts = {(AnmalZoo.Snort, 1411)}

    max_target_stride = 2
    uat_count = 200
    automata_per_stage = 50

    automatas = atma.parse_anml_file(anml_path[uat])
    automatas.remove_ors()
    automatas = automatas.get_connected_components_as_automatas()

    after_match_reg = False
    actual_bram = False  # if True, actual bram will be used. Otherwise, LUT emulates bram


    #uat_count = len(automatas)  # comment this to test a subset of automatons defined in uat_count

    automatas = automatas[:uat_count]
    uat_count = len(automatas)

    number_of_stages = math.ceil(len(automatas) / float(automata_per_stage))
    atms_per_stage = int(math.ceil(len(automatas) / float(number_of_stages)))

    for hom_between, is_Bram in [(False, True)]:
        hdl_writers = []
        for i in range(max_target_stride + 1):
            hdl_folder_name = hd_gen.get_hdl_folder_name(prefix=str(uat), number_of_atms=len(automatas),
                                                         stride_value=i, before_match_reg=False,
                                                         after_match_reg=after_match_reg, ste_type=1, use_bram=is_Bram,
                                                         use_compression=False, compression_depth=-1)

            generator_ins = hd_gen.HDL_Gen(path=os.path.join(result_dir, hdl_folder_name), before_match_reg=False,
                                           after_match_reg=after_match_reg, ste_type=1,
                                           total_input_len=4 * pow(2, i),
                                           bram_shape=(512, 36))
            hdl_writers.append(generator_ins)


        for atm_idx, atm in enumerate(automatas):
            b_atm = atma.automata_network.get_bit_automaton(atm, original_bit_width=atm.max_val_dim_bits_len)
            atm = atma.automata_network.get_strided_automata2(atm=b_atm,
                                                              stride_value=4,
                                                              is_scalar=True,
                                                              base_value=2,
                                                              add_residual=True)

            for stride_val in reversed(range(max_target_stride + 1)):
                if (uat, atm_idx) in exempts:
                    continue
                print 'processing {0} stride {3} number {1} from {2}'.format(uat, atm_idx, uat_count, stride_val)
                s_atm = atm

                for _ in range(stride_val):
                    if s_atm is atm:
                        s_atm = atm.get_single_stride_graph()
                    else:
                        s_atm = s_atm.get_single_stride_graph()

                if s_atm.is_homogeneous is False:
                    s_atm.make_homogenous()

                minimize_automata(s_atm)

                if is_Bram is True and hom_between is False:
                    s_atm.fix_split_all()

                if is_Bram:
                    lut_bram_dic = {n: tuple((2 for _ in range(s_atm.stride_value))) for n in s_atm.nodes if
                                    n.is_fake is False}
                else:
                    lut_bram_dic = {}

                hdl_writers[stride_val].register_automata(atm=s_atm, use_compression=False, lut_bram_dic=lut_bram_dic)

                if (atm_idx + 1) % atms_per_stage == 0:
                    hdl_writers[stride_val].register_stage_pending(single_out=False, use_bram=actual_bram)

        for i in range(max_target_stride + 1):
            hdl_writers[i].finilize()

    return uat, return_result
Example #6
0
def process_truthtable(bitwidth, input_directory, automata_per_stage):

    # This is the directory name to be created for HDL files
    output_hdl_directory = input_directory + '/' + str(bitwidth) + '_' + str(
        automata_per_stage)

    # Grab the input files
    truthtable_input_files = glob.glob(input_directory + '/*.tt')
    print("Truth Table Files: ", truthtable_input_files)

    # Clean up directory
    shutil.rmtree(output_hdl_directory, ignore_errors=True)
    os.mkdir(output_hdl_directory)

    # Create a directory name for the HDL code
    hdl_folder_name = hd_gen.get_hdl_folder_name(
        prefix=output_hdl_directory,
        number_of_atms=len(truthtable_input_files),
        stride_value=0,
        before_match_reg=False,
        after_match_reg=False,
        ste_type=1,
        use_bram=False,
        use_compression=False,
        compression_depth=-1,
        symbolic=True)

    print("Folder name to store the HDLs: ", hdl_folder_name)

    # Create a hardware Generator
    # for now, we'll only allow either symbolic or explicit automata; no mixing
    generator_ins = hd_gen.HDL_Gen(path=hdl_folder_name,
                                   before_match_reg=False,
                                   after_match_reg=False,
                                   ste_type=1,
                                   total_input_len=dbw,
                                   symbolic=True)

    # Iterate through the TruthTable files in the directory
    for index, truth_table_input_file in enumerate(truthtable_input_files):

        # Build a Truthtable module with VerilogTools
        module_name = 'Automata_tt_' + str(index)
        verilog_filename = hdl_folder_name + '/automata_tt_' + str(
            index) + '.sv'

        print(truth_table_input_file, module_name, verilog_filename)

        inputs, outputs = VerilogTools.build_truthtable(
            truth_table_input_file, module_name, verilog_filename)

        # for now, we will use this automata proxy
        automata = Automatanetwork('tt_' + str(index),
                                   True,
                                   1,
                                   255,
                                   inputs=inputs)

        new_node = S_T_E(start_type=StartType.non_start,
                         is_report=True,
                         is_marked=False,
                         id=automata.get_new_id(),
                         symbol_set=None,
                         adjacent_S_T_E_s=None,
                         report_residual=1,
                         report_code=1)

        automata.add_element(new_node)

        # Register this automaton
        # For now, this just builds the stages
        generator_ins.register_automata(atm=automata, use_compression=False)

        # We've got another batch of automata_per_stage automata to stage
        if (index + 1) % automata_per_stage == 0:
            generator_ins.register_stage_pending(single_out=False,
                                                 use_bram=False)

    # DO we need this? maybe if our number of automata is not a perfect multiple
    # of automata_per_stage?
    generator_ins.register_stage_pending(single_out=False, use_bram=False)

    #Finalize and wrap up HDL in archive folder
    generator_ins.finilize()

    # Using gztar to handle LARGE automata workloads
    shutil.make_archive(hdl_folder_name, 'gztar', output_hdl_directory)
    shutil.rmtree(output_hdl_directory)
Example #7
0
def process_anml(bitwidth, input_directory, automata_per_stage):

    # This is the directory name to be created for HDL files
    output_hdl_directory = input_directory + '/' + str(bitwidth) + '_' + str(
        automata_per_stage)

    anml_input_files = glob.glob(input_directory + '/*.anml')

    # Clean up directory
    shutil.rmtree(output_hdl_directory, ignore_errors=True)
    os.mkdir(output_hdl_directory)

    # Create a directory name for the HDL code
    hdl_folder_name = hd_gen.get_hdl_folder_name(
        prefix=output_hdl_directory,
        number_of_atms=len(anml_input_files),
        stride_value=0,
        before_match_reg=False,
        after_match_reg=False,
        ste_type=1,
        use_bram=False,
        use_compression=False,
        compression_depth=-1,
        symbolic=False)

    # Create a hardware Generator
    generator_ins = hd_gen.HDL_Gen(path=hdl_folder_name,
                                   before_match_reg=False,
                                   after_match_reg=False,
                                   ste_type=1,
                                   total_input_len=dbw,
                                   symbolic=False)

    # Iterate through the ANML files in the directory
    for index, anml_input_file in enumerate(anml_input_files):

        # Grab the automata file number
        automata_number = re.search('\d+', anml_input_file).group(0)

        # Parse the ANML file
        automata = atma.parse_anml_file(anml_input_file)

        if dbw == 16:
            print "Doing 16-bit!"
            automata_with_set_bw = automata.get_single_stride_graph()
        else:
            print "Doing 8-bit!"
            automata_with_set_bw = automata

        assert dbw == automata_with_set_bw.total_bits_len, "Bitwidth assumption is incorrect!"

        if not automata_with_set_bw.is_homogeneous:
            print "Converting to homogeneous automaton"
            automata_with_set_bw.make_homogenous()

        # Minimizing the automata with NFA heuristics
        if minimize:
            minimize_automata(automata_with_set_bw)
            #atma.generate_anml_file(anml_input_file + "_min.anml", automata)
        else:
            print("No minimization of Automata")

        # Drawing automata graph
        if drawing:
            print "Drawing automata svg graph"
            automata_with_set_bw.draw_graph(anml_input_file +
                                            "_minimized_hw.svg")

        automata_info.append('{},{},{}\n'.format(
            automata_number, str(automata_with_set_bw.nodes_count),
            str(automata_with_set_bw.edges_count)))

        # # Register this automaton
        generator_ins.register_automata(atm=automata_with_set_bw,
                                        use_compression=False)

        # We've got another batch of automata_per_stage automata to stage
        if (index + 1) % automata_per_stage == 0:
            generator_ins.register_stage_pending(single_out=False,
                                                 use_bram=False)

    # DO we need this? maybe if our number of automata is not a perfect multiple
    # of automata_per_stage?
    generator_ins.register_stage_pending(single_out=False, use_bram=False)

    #Finalize and wrap up HDL in archive folder
    generator_ins.finilize()

    # Using gztar to handle LARGE automata workloads
    shutil.make_archive(hdl_folder_name, 'gztar', output_hdl_directory)
    shutil.rmtree(output_hdl_directory)

    # Write the automata node and edge count to a file
    with open(hdl_folder_name + '.stats', 'w') as output_file:
        output_file.write("Number of States, Number of Edges\n")
        output_file.write("---------------------------------\n")
        for automata_string in automata_info:
            output_file.write(automata_string)
def process_anml(bitwidth, input_directory, automata_per_stage):

    # This is the directory name to be created for HDL files
    output_hdl_directory = input_directory + '/' + str(bitwidth) + '_' + str(
        automata_per_stage)

    anml_input_files = glob.glob(input_directory + '/*.anml')
    print("ANML Files: ", anml_input_files)

    # Clean up directory
    shutil.rmtree(output_hdl_directory, ignore_errors=True)
    os.mkdir(output_hdl_directory)

    SYMBOLIC = True

    # Create a directory name for the HDL code
    hdl_folder_name = hd_gen.get_hdl_folder_name(
        prefix=output_hdl_directory,
        number_of_atms=len(anml_input_files),
        stride_value=0,
        before_match_reg=False,
        after_match_reg=False,
        ste_type=1,
        use_bram=False,
        use_compression=False,
        compression_depth=-1,
        symbolic=SYMBOLIC)

    print("Folder name to store the HDLs: ", hdl_folder_name)

    # Create a hardware Generator
    generator_ins = hd_gen.HDL_Gen(path=hdl_folder_name,
                                   before_match_reg=False,
                                   after_match_reg=False,
                                   ste_type=1,
                                   total_input_len=dbw,
                                   symbolic=SYMBOLIC)

    # Iterate through the ANML files in the directory
    for index, anml_input_file in enumerate(anml_input_files):

        print("Parsing Automata: {}".format(anml_input_file))

        module_name = 'Automata_tt_' + str(index)
        verilog_filename = hdl_folder_name + '/automata_tt_' + str(
            index) + '.sv'

        # Parse the ANML file
        automata = atma.parse_anml_file(anml_input_file)

        # Minimizing the automata with NFA heuristics

        if minimize:
            print("Minimizing Automata: {}".format(anml_input_file))
            minimize_automata(automata)
            #atma.generate_anml_file(anml_input_file + "_min.anml", automata)
        else:
            print("No minimization of Automata")

        tables, start_states, accept_states = atma.generate_tt(automata)

        inputs = atma.build_truthtable(tables, start_states, accept_states,
                                       module_name, verilog_filename)

        print "Inputs: ", inputs

        # for now, we will use this automata proxy
        automata = Automatanetwork('tt_' + str(index),
                                   True,
                                   1,
                                   255,
                                   inputs=inputs)

        new_node = S_T_E(start_type=StartType.non_start,
                         is_report=True,
                         is_marked=False,
                         id=automata.get_new_id(),
                         symbol_set=None,
                         adjacent_S_T_E_s=None,
                         report_residual=1,
                         report_code=1)

        automata.add_element(new_node)

        print "Number of states: ", automata.nodes_count

        # Register this automaton
        generator_ins.register_automata(atm=automata, use_compression=False)

        # We've got another batch of automata_per_stage automata to stage
        if (index + 1) % automata_per_stage == 0:
            generator_ins.register_stage_pending(single_out=False,
                                                 use_bram=False)

    # DO we need this? maybe if our number of automata is not a perfect multiple
    # of automata_per_stage?
    generator_ins.register_stage_pending(single_out=False, use_bram=False)

    #Finalize and wrap up HDL in archive folder
    generator_ins.finilize()

    # Using gztar to handle LARGE automata workloads
    shutil.make_archive(hdl_folder_name, 'gztar', output_hdl_directory)
    shutil.rmtree(output_hdl_directory)
Example #9
0
def process_single_ds(uat):
    all_automata = atma.parse_anml_file(anml_path[uat])
    all_automata.remove_ors()
    automatas = all_automata.get_connected_components_as_automatas()

    if len(automatas) > number_of_autoamtas:
        #automatas = random.sample(automatas, number_of_autoamtas)
        automatas = automatas[:number_of_autoamtas]

    number_of_stages = math.ceil(len(automatas) / float(automata_per_stage))
    atms_per_stage = int(math.ceil(len(automatas) / float(number_of_stages)))

    for stride_val in range(1):

        hdl_apth = hd_gen.get_hdl_folder_name(
            prefix="comptest" + str(uat),
            number_of_atms=len(automatas),
            stride_value=stride_val,
            before_match_reg=before_match_reg,
            after_match_reg=after_match_reg,
            ste_type=ste_type,
            use_bram=use_bram,
            use_compression=use_compression,
            compression_depth=compression_depth)

        generator_ins = hd_gen.HDL_Gen(
            path=hdl_apth,
            before_match_reg=before_match_reg,
            after_match_reg=after_match_reg,
            ste_type=ste_type,
            total_input_len=hd_gen.HDL_Gen.get_bit_len(
                all_automata.max_val_dim) * pow(2, stride_val))

        for atm_idx, atm in enumerate(automatas):
            if (uat, atm_idx) in exempts:
                continue

            print 'processing {0} stride{3} automata {1} from {2}'.format(
                uat, atm_idx + 1, len(automatas), stride_val)

            bc_bits_len = 8
            if use_compression:
                bc_sym_dict = get_equivalent_symbols([atm], replace=True)
                bc_bits_len = int(
                    math.ceil(math.log(max(bc_sym_dict.values()), 2)))

            translation_list = []

            for s in range(stride_val):
                atm = atm.get_single_stride_graph()
                if use_compression and s < compression_depth:
                    new_translation = get_equivalent_symbols([atm],
                                                             replace=True)
                    translation_list.append(new_translation)

            if atm.is_homogeneous is False:
                atm.make_homogenous()

            minimize_automata(atm)

            #lut_bram_dic = {n: (1, 2) for n in atm.nodes}
            generator_ins.register_automata(
                atm=atm,
                use_compression=use_compression,
                byte_trans_map=bc_sym_dict if use_compression else None)

            if use_compression:
                generator_ins.register_compressor(
                    [atm.id],
                    byte_trans_map=bc_sym_dict,
                    translation_list=translation_list)

            if (atm_idx + 1) % atms_per_stage == 0:
                generator_ins.register_stage_pending(use_bram=use_bram)

        generator_ins.finilize()
Example #10
0
        hdl_apth = hd_gen.get_hdl_folder_path(
            prefix="comptestrandom" + str(uat),
            number_of_atms=len(automatas),
            stride_value=stride_val,
            before_match_reg=before_match_reg,
            after_match_reg=after_match_reg,
            ste_type=ste_type,
            use_bram=use_bram,
            use_compression=use_compression,
            compression_depth=compression_depth)

        generator_ins = hd_gen.HDL_Gen(
            path=hdl_apth,
            before_match_reg=before_match_reg,
            after_match_reg=after_match_reg,
            ste_type=ste_type,
            total_input_len=hd_gen.HDL_Gen.get_bit_len(
                all_automata.max_val_dim) * pow(2, stride_val))

        for atm_idx, atm in enumerate(automatas):
            if (uat, atm_idx) in exempts:
                continue

            print 'processing {0} stride{3} automata {1} from {2}'.format(
                uat, atm_idx, len(automatas), stride_val)

            bc_bits_len = 8
            if use_compression:
                bc_sym_dict = get_equivalent_symbols([atm], replace=True)
                bc_bits_len = int(
def process_single_ds(uat):

    return_result = {}
    result_dir = out_dir + str(uat)

    shutil.rmtree(result_dir, ignore_errors=True)
    os.mkdir(result_dir)
    exempts = {(AnmalZoo.Snort, 1411)}

    max_target_stride = 2
    uat_count = 200
    automata_per_stage = 50

    automatas = atma.parse_anml_file(anml_path[uat])
    automatas.remove_ors()
    automatas = automatas.get_connected_components_as_automatas()

    after_match_reg = False
    actual_bram = False  # if True, actual bram will be used. Otherwise, LUT emulates bram

    automatas = automatas[:uat_count]
    uat_count = len(automatas)

    number_of_stages = math.ceil(len(automatas) / float(automata_per_stage))
    atms_per_stage = int(math.ceil(len(automatas) / float(number_of_stages)))

    for hom_between, is_Bram in [(False, True)]:

        for stride_val in range(max_target_stride + 1):

            hdl_folder_name = hd_gen.get_hdl_folder_name(
                prefix=str(uat),
                number_of_atms=len(automatas),
                stride_value=stride_val,
                before_match_reg=False,
                after_match_reg=after_match_reg,
                ste_type=1,
                use_bram=is_Bram,
                use_compression=False,
                compression_depth=-1)

            generator_ins = hd_gen.HDL_Gen(
                path=os.path.join(result_dir, hdl_folder_name),
                before_match_reg=False,
                after_match_reg=False,
                ste_type=1,
                total_input_len=automatas[0].max_val_dim_bits_len *
                pow(2, stride_val),
                bram_shape=(512, 36))

            for atm_idx, atm in enumerate(automatas):
                if (uat, atm_idx) in exempts:
                    continue
                print 'processing {0} stride {3} number {1} from {2}'.format(
                    uat, atm_idx, uat_count, stride_val)

                for _ in range(stride_val):
                    if is_Bram is True and hom_between is True and atm.is_homogeneous is False:
                        atm.make_homogenous()
                        atm.make_parentbased_homogeneous()

                    atm = atm.get_single_stride_graph()

                if atm.is_homogeneous is False:
                    atm.make_homogenous()

                minimize_automata(atm)

                if is_Bram is True and hom_between is False:
                    atm.fix_split_all()

                if is_Bram:
                    lut_bram_dic = {
                        n: tuple((2 for _ in range(atm.stride_value)))
                        for n in atm.nodes if n.is_fake is False
                    }
                else:
                    lut_bram_dic = {}

                generator_ins.register_automata(atm=atm,
                                                use_compression=False,
                                                lut_bram_dic=lut_bram_dic)

                if (atm_idx + 1) % atms_per_stage == 0:
                    generator_ins.register_stage_pending(single_out=False,
                                                         use_bram=actual_bram)

            generator_ins.finilize()

    return uat, return_result
Example #12
0
    hdl_folder_name = hd_gen.get_hdl_folder_name(prefix=input_anml_file,
                                                 number_of_atms=num_automata,
                                                 stride_value=1,
                                                 before_match_reg=False,
                                                 after_match_reg=False,
                                                 ste_type=1,
                                                 use_bram=False,
                                                 use_compression=False,
                                                 compression_depth=-1)

    #print "Folder name to store the HDLs: ", hdl_folder_name

    # Create a hardware Generator
    generator_ins = hd_gen.HDL_Gen(path=os.path.join(output_hdl_directory,
                                                     hdl_folder_name),
                                   before_match_reg=False,
                                   after_match_reg=False,
                                   ste_type=1,
                                   total_input_len=dbw)

    #print "ANML Input File: ", input_anml_file

    # Parse the ANML file
    automata = atma.parse_anml_file(input_anml_file)

    # Iterate through the ANML files in the directory
    for index in range(num_automata):

        temp_automata = automata.clone()

        # Assign each unique automaton its own unique name or the HDL generator won't work
        temp_automata.id = 'an{}'.format(index)