def generate_test_wrapper(self, tensor_descriptors, input_tables, output_tables): auto_test = CodeFunction("test_wrapper", output_format=ML_Int32) tested_function = self.implementation.get_function_object() function_name = self.implementation.get_name() failure_report_op = FunctionOperator("report_failure") failure_report_function = FunctionObject("report_failure", [], ML_Void, failure_report_op) printf_success_op = FunctionOperator( "printf", arg_map={0: "\"test successful %s\\n\"" % function_name}, void_function=True, require_header=["stdio.h"]) printf_success_function = FunctionObject("printf", [], ML_Void, printf_success_op) # accumulate element number acc_num = Variable("acc_num", precision=ML_Int64, var_type=Variable.Local) test_loop = self.get_tensor_test_wrapper( tested_function, tensor_descriptors, input_tables, output_tables, acc_num, self.generate_tensor_check_loop) # common test scheme between scalar and vector functions test_scheme = Statement(test_loop, printf_success_function(), Return(Constant(0, precision=ML_Int32))) auto_test.set_scheme(test_scheme) return FunctionGroup([auto_test])
def declare_prototype(self): return FunctionObject( self.function_name, self.input_formats, self.output_precision, self, )
def get_printf_error_detail_fct(self, tensor_descriptor): output_format = tensor_descriptor.scalar_format # result is the second argument of the function (after erroenous element index) result_arg_id = 1 # build the format string for result/expected display result_display_format = output_format.get_display_format( ).format_string result_display_vars = output_format.get_display_format( ).pre_process_fct("{%d}" % result_arg_id) template = ("printf(\"error[%u]: {fct_name}," " result is {result_display_format} " "vs expected \"" ", {{0}}, {result_display_vars}" ")").format( fct_name=self.function_name, result_display_format=result_display_format, result_display_vars=result_display_vars, ) printf_op = TemplateOperatorFormat(template, void_function=True, arity=(1 + 1), require_header=["stdio.h"]) printf_error_detail_function = FunctionObject( "printf", [ML_UInt32] + [output_format], ML_Void, printf_op) return printf_error_detail_function
def get_printf_value(optree, error_value, expected_value, language=C_Code): """ generate a printf call to display the local error value alongside the expected value and result """ error_display_format = error_value.get_precision().get_display_format(language) expected_display_format = expected_value.get_precision().get_display_format(language) result_display_format = optree.get_precision().get_display_format(language) # generated function expects 3 arguments, optree value, error value and # expected value, in that order error_vars = error_display_format.pre_process_fct("{1}") expected_vars = expected_display_format.pre_process_fct("{2}") result_vars = result_display_format.pre_process_fct("{0}") template = ("printf(\"node {:35} error is {}, expected {} got {}\\n\", {}, {}, {})").format( str(optree.get_tag()), error_display_format.format_string, expected_display_format.format_string, result_display_format.format_string, error_vars, expected_vars, result_vars ) arg_format_list = [ optree.get_precision(), error_value.get_precision(), expected_value.get_precision() ] printf_op = TemplateOperatorFormat(template, void_function=True, arity=3) printf_input_function = FunctionObject("printf", arg_format_list, ML_Void, printf_op) return printf_input_function(optree, error_value, expected_value)
def get_printf_input_function(self): input_precisions = [self.get_input_precision(0).get_data_precision()] # build the complete format string from the input precisions input_display_formats = ", ".join( prec.get_display_format().format_string for prec in input_precisions) input_display_vars = ", ".join( prec.get_display_format().pre_process_fct("{%d}" % index) for index, prec in enumerate(input_precisions, 1)) result_arg_id = 1 + len(input_precisions) # expected_arg_id = 1 + result_arg_id # build the format string for result/expected display result_display_format = self.precision.get_display_format( ).format_string result_display_vars = self.precision.get_display_format( ).pre_process_fct("{%d}" % result_arg_id) # expected_display_vars = self.precision.get_display_format().pre_process_fct("{%d}" % expected_arg_id) template = ("printf(\"error[%u]: {fct_name}({arg_display_format})," " result is {result_display_format} " "vs expected \"" ", {{0}}, {arg_display_vars}, {result_display_vars}" ")").format( fct_name=self.function_name, arg_display_format=input_display_formats, arg_display_vars=input_display_vars, result_display_format=result_display_format, #expected_display_format=result_display_format, result_display_vars=result_display_vars, #expected_display_vars=expected_display_vars ) printf_op = TemplateOperatorFormat(template, void_function=True, arity=(result_arg_id + 1), require_header=["stdio.h"]) printf_input_function = FunctionObject( "printf", [ML_UInt32] + input_precisions + [self.precision], ML_Void, printf_op) return printf_input_function
def generate_scheme(self): # declaring target and instantiating optimization engine precision_ptr = self.get_input_precision(0) index_format = self.get_input_precision(2) multi_elt_num = self.multi_elt_num dst = self.implementation.add_input_variable("dst", precision_ptr) src = self.implementation.add_input_variable("src", precision_ptr) n = self.implementation.add_input_variable("len", index_format) i = Variable("i", precision=index_format, var_type=Variable.Local) CU0 = Constant(0, precision=index_format) element_format = self.precision self.function_list = [] if multi_elt_num > 1: element_format = VECTOR_TYPE_MAP[self.precision][multi_elt_num] elt_input = TableLoad(src, i, precision=element_format) local_exp = Variable("local_exp", precision=element_format, var_type=Variable.Local) if self.use_libm_function: libm_fct_operator = FunctionOperator(self.use_libm_function, arity=1) libm_fct = FunctionObject(self.use_libm_function, [ML_Binary32], ML_Binary32, libm_fct_operator) if multi_elt_num > 1: result_list = [ libm_fct( VectorElementSelection(elt_input, Constant(elt_id, precision=ML_Integer), precision=self.precision)) for elt_id in range(multi_elt_num) ] result = VectorAssembling(*result_list, precision=element_format) else: result = libm_fct(elt_input) elt_result = ReferenceAssign(local_exp, result) else: if multi_elt_num > 1: scalar_result = Variable("scalar_result", precision=self.precision, var_type=Variable.Local) fct_ctor_args = self.function_ctor.get_default_args( precision=self.precision, libm_compliant=False, ) meta_function = self.function_ctor(fct_ctor_args) exponential_scheme = meta_function.generate_scheme() # instanciating required passes for typing pass_inst_abstract_prec = PassInstantiateAbstractPrecision( self.processor) pass_inst_prec = PassInstantiatePrecision( self.processor, default_precision=None) # exectuting format instanciation passes on optree exponential_scheme = pass_inst_abstract_prec.execute_on_optree( exponential_scheme) exponential_scheme = pass_inst_prec.execute_on_optree( exponential_scheme) vectorizer = StaticVectorizer() # extracting scalar argument from meta_exponential meta function scalar_input = meta_function.implementation.arg_list[0] # vectorize scalar scheme vector_result, vec_arg_list, vector_scheme, scalar_callback, scalar_callback_fct = vectorize_function_scheme( vectorizer, self.get_main_code_object(), exponential_scheme, element_format.get_scalar_format(), [scalar_input], multi_elt_num) elt_result = inline_function(vector_scheme, vector_result, {vec_arg_list[0]: elt_input}) local_exp = vector_result self.function_list.append(scalar_callback_fct) libm_fct = scalar_callback else: scalar_input = elt_input scalar_result = local_exp elt_result = generate_inline_fct_scheme( self.function_ctor, scalar_result, [scalar_input], { "precision": self.precision, "libm_compliant": False }) CU1 = Constant(1, precision=index_format) local_exp_init_value = Constant(0, precision=self.precision) if multi_elt_num > 1: local_exp_init_value = Constant([0] * multi_elt_num, precision=element_format) remain_n = Modulo(n, multi_elt_num, precision=index_format) iter_n = n - remain_n CU_ELTNUM = Constant(multi_elt_num, precision=index_format) inc = i + CU_ELTNUM else: remain_n = None iter_n = n inc = i + CU1 # main loop processing multi_elt_num element(s) per iteration main_loop = Loop( ReferenceAssign(i, CU0), i < iter_n, Statement(ReferenceAssign(local_exp, local_exp_init_value), elt_result, TableStore(local_exp, dst, i, precision=ML_Void), ReferenceAssign(i, inc)), ) # epilog to process remaining item (when the length is not a multiple # of multi_elt_num) if not remain_n is None: # TODO/FIXME: try alternative method for processing epilog # by using full vector length and mask epilog_loop = Loop( Statement(), i < n, Statement( TableStore(libm_fct( TableLoad(src, i, precision=self.precision)), dst, i, precision=ML_Void), ReferenceAssign(i, i + CU1), )) main_loop = Statement(main_loop, epilog_loop) return main_loop
def generate_scheme(self): # declaring target and instantiating optimization engine precision_ptr = self.get_input_precision(0) index_format = self.get_input_precision(2) dst = self.implementation.add_input_variable("dst", precision_ptr) src = self.implementation.add_input_variable("src", precision_ptr) n = self.implementation.add_input_variable("len", index_format) i = Variable("i", precision=index_format, var_type=Variable.Local) CU1 = Constant(1, precision=index_format) CU0 = Constant(0, precision=index_format) inc = i + CU1 elt_input = TableLoad(src, i, precision=self.precision) local_exp = Variable("local_exp", precision=self.precision, var_type=Variable.Local) if self.use_libm_function: libm_exp_operator = FunctionOperator("expf", arity=1) libm_exp = FunctionObject("expf", [ML_Binary32], ML_Binary32, libm_exp_operator) elt_result = ReferenceAssign(local_exp, libm_exp(elt_input)) else: exponential_args = ML_Exponential.get_default_args( precision=self.precision, libm_compliant=False, debug=False, ) meta_exponential = ML_Exponential(exponential_args) exponential_scheme = meta_exponential.generate_scheme() elt_result = inline_function( exponential_scheme, local_exp, {meta_exponential.implementation.arg_list[0]: elt_input}, ) elt_acc = Variable("elt_acc", precision=self.precision, var_type=Variable.Local) exp_loop = Loop( ReferenceAssign(i, CU0), i < n, Statement(ReferenceAssign(local_exp, 0), elt_result, TableStore(local_exp, dst, i, precision=ML_Void), ReferenceAssign(elt_acc, elt_acc + local_exp), ReferenceAssign(i, i + CU1)), ) sum_rcp = Division(1, elt_acc, precision=self.precision, tag="sum_rcp", debug=debug_multi) div_loop = Loop( ReferenceAssign(i, CU0), i < n, Statement( TableStore(Multiplication( TableLoad(dst, i, precision=self.precision), sum_rcp), dst, i, precision=ML_Void), ReferenceAssign(i, inc)), ) main_scheme = Statement(ReferenceAssign(elt_acc, 0), exp_loop, sum_rcp, div_loop) return main_scheme
def generate_bench(self, processor, test_num=1000, unroll_factor=10): """ generate performance bench for self.op_class """ initial_inputs = [ Constant(random.uniform(inf(self.init_interval), sup(self.init_interval)), precision=precision) for i, precision in enumerate(self.input_precisions) ] var_inputs = [ Variable("var_%d" % i, precision=FormatAttributeWrapper(precision, ["volatile"]), var_type=Variable.Local) for i, precision in enumerate(self.input_precisions) ] printf_timing_op = FunctionOperator( "printf", arg_map={ 0: "\"%s[%s] %%lld elts computed "\ "in %%lld cycles =>\\n %%.3f CPE \\n\"" % ( self.bench_name, self.output_precision.get_display_format() ), 1: FO_Arg(0), 2: FO_Arg(1), 3: FO_Arg(2), 4: FO_Arg(3) }, void_function=True ) printf_timing_function = FunctionObject( "printf", [self.output_precision, ML_Int64, ML_Int64, ML_Binary64], ML_Void, printf_timing_op) timer = Variable("timer", precision=ML_Int64, var_type=Variable.Local) void_function_op = FunctionOperator("(void)", arity=1, void_function=True) void_function = FunctionObject("(void)", [self.output_precision], ML_Void, void_function_op) # initialization of operation inputs init_assign = metaop.Statement() for var_input, init_value in zip(var_inputs, initial_inputs): init_assign.push(ReferenceAssign(var_input, init_value)) # test loop loop_i = Variable("i", precision=ML_Int64, var_type=Variable.Local) test_num_cst = Constant(test_num / unroll_factor, precision=ML_Int64, tag="test_num") # Goal build a chain of dependant operation to measure # elementary operation latency local_inputs = tuple(var_inputs) local_result = self.op_class(*local_inputs, precision=self.output_precision, unbreakable=True) for i in range(unroll_factor - 1): local_inputs = tuple([local_result] + var_inputs[1:]) local_result = self.op_class(*local_inputs, precision=self.output_precision, unbreakable=True) # renormalisation local_result = self.renorm_function(local_result) # variable assignation to build dependency chain var_assign = Statement() var_assign.push(ReferenceAssign(var_inputs[0], local_result)) final_value = var_inputs[0] # loop increment value loop_increment = 1 test_loop = Loop( ReferenceAssign(loop_i, Constant(0, precision=ML_Int32)), loop_i < test_num_cst, Statement(var_assign, ReferenceAssign(loop_i, loop_i + loop_increment)), ) # bench scheme test_scheme = Statement( ReferenceAssign(timer, processor.get_current_timestamp()), init_assign, test_loop, ReferenceAssign( timer, Subtraction(processor.get_current_timestamp(), timer, precision=ML_Int64)), # prevent intermediary variable simplification void_function(final_value), printf_timing_function( final_value, Constant(test_num, precision=ML_Int64), timer, Division(Conversion(timer, precision=ML_Binary64), Constant(test_num, precision=ML_Binary64), precision=ML_Binary64)) # ,Return(Constant(0, precision = ML_Int32)) ) return test_scheme
def generate_bench_wrapper(self, test_num=1, loop_num=100000, test_ranges=[Interval(-1.0, 1.0)], debug=False): # interval where the array lenght is chosen from (randomly) index_range = self.test_index_range auto_test = CodeFunction("bench_wrapper", output_format=ML_Binary64) tested_function = self.implementation.get_function_object() function_name = self.implementation.get_name() failure_report_op = FunctionOperator("report_failure") failure_report_function = FunctionObject("report_failure", [], ML_Void, failure_report_op) printf_success_op = FunctionOperator( "printf", arg_map={0: "\"test successful %s\\n\"" % function_name}, void_function=True) printf_success_function = FunctionObject("printf", [], ML_Void, printf_success_op) output_precision = FormatAttributeWrapper(self.precision, ["volatile"]) test_total = test_num # number of arrays expected as inputs for tested_function NUM_INPUT_ARRAY = 1 # position of the input array in tested_function operands (generally # equals to 1 as to 0-th input is often the destination array) INPUT_INDEX_OFFSET = 1 # concatenating standard test array at the beginning of randomly # generated array TABLE_SIZE_VALUES = [ len(std_table) for std_table in self.standard_test_cases ] + [ random.randrange(index_range[0], index_range[1] + 1) for i in range(test_num) ] OFFSET_VALUES = [sum(TABLE_SIZE_VALUES[:i]) for i in range(test_total)] table_size_offset_array = generate_2d_table( test_total, 2, ML_UInt32, self.uniquify_name("table_size_array"), value_gen=(lambda row_id: (TABLE_SIZE_VALUES[row_id], OFFSET_VALUES[row_id]))) INPUT_ARRAY_SIZE = sum(TABLE_SIZE_VALUES) # TODO/FIXME: implement proper input range depending on input index # assuming a single input array input_precisions = [self.get_input_precision(1).get_data_precision()] rng_map = [ get_precision_rng(precision, inf(test_range), sup(test_range)) for precision, test_range in zip(input_precisions, test_ranges) ] # generated table of inputs input_tables = [ generate_1d_table( INPUT_ARRAY_SIZE, self.get_input_precision(INPUT_INDEX_OFFSET + table_id).get_data_precision(), self.uniquify_name("input_table_arg%d" % table_id), value_gen=( lambda _: input_precisions[table_id].round_sollya_object( rng_map[table_id].get_new_value(), sollya.RN))) for table_id in range(NUM_INPUT_ARRAY) ] # generate output_array output_array = generate_1d_table( INPUT_ARRAY_SIZE, output_precision, self.uniquify_name("output_array"), #value_gen=(lambda _: FP_QNaN(self.precision)) value_gen=(lambda _: None), const=False, empty=True) # accumulate element number acc_num = Variable("acc_num", precision=ML_Int64, var_type=Variable.Local) def empty_post_statement_gen(input_tables, output_array, table_size_offset_array, array_offset, array_len, test_id): return Statement() test_loop = self.get_array_test_wrapper(test_total, tested_function, table_size_offset_array, input_tables, output_array, acc_num, empty_post_statement_gen) timer = Variable("timer", precision=ML_Int64, var_type=Variable.Local) printf_timing_op = FunctionOperator( "printf", arg_map={ 0: "\"%s %%\"PRIi64\" elts computed in %%\"PRIi64\" nanoseconds => %%.3f CPE \\n\"" % function_name, 1: FO_Arg(0), 2: FO_Arg(1), 3: FO_Arg(2) }, void_function=True) printf_timing_function = FunctionObject( "printf", [ML_Int64, ML_Int64, ML_Binary64], ML_Void, printf_timing_op) vj = Variable("j", precision=ML_Int32, var_type=Variable.Local) loop_num_cst = Constant(loop_num, precision=ML_Int32, tag="loop_num") loop_increment = 1 # bench measure of clock per element cpe_measure = Division( Conversion(timer, precision=ML_Binary64), Conversion(acc_num, precision=ML_Binary64), precision=ML_Binary64, tag="cpe_measure", ) # common test scheme between scalar and vector functions test_scheme = Statement( self.processor.get_init_timestamp(), ReferenceAssign(timer, self.processor.get_current_timestamp()), ReferenceAssign(acc_num, 0), Loop( ReferenceAssign(vj, Constant(0, precision=ML_Int32)), vj < loop_num_cst, Statement(test_loop, ReferenceAssign(vj, vj + loop_increment))), ReferenceAssign( timer, Subtraction(self.processor.get_current_timestamp(), timer, precision=ML_Int64)), printf_timing_function( Conversion(acc_num, precision=ML_Int64), timer, cpe_measure, ), Return(cpe_measure), # Return(Constant(0, precision = ML_Int32)) ) auto_test.set_scheme(test_scheme) return FunctionGroup([auto_test])
def generate_array_check_loop(self, input_tables, output_array, table_size_offset_array, array_offset, array_len, test_id): # internal array iterator index vj = Variable("j", precision=ML_UInt32, var_type=Variable.Local) printf_input_function = self.get_printf_input_function() printf_error_template = "printf(\"max %s error is %s \\n\", %s)" % ( self.function_name, self.precision.get_display_format().format_string, self.precision.get_display_format().pre_process_fct("{0}")) printf_error_op = TemplateOperatorFormat(printf_error_template, arity=1, void_function=True, require_header=["stdio.h"]) printf_error_function = FunctionObject("printf", [self.precision], ML_Void, printf_error_op) printf_max_op = FunctionOperator( "printf", arg_map={ 0: "\"max %s error is reached at input number %s \\n \"" % (self.function_name, "%d"), 1: FO_Arg(0) }, void_function=True, require_header=["stdio.h"]) printf_max_function = FunctionObject("printf", [self.precision], ML_Void, printf_max_op) NUM_INPUT_ARRAY = len(input_tables) # generate the expected table for the whole multi-array expected_table = self.generate_expected_table(input_tables, table_size_offset_array) # inputs for the (vj)-th entry of the sub-arrat local_inputs = tuple( TableLoad(input_tables[in_id], array_offset + vj) for in_id in range(NUM_INPUT_ARRAY)) # expected values for the (vj)-th entry of the sub-arrat expected_values = [ TableLoad(expected_table, array_offset + vj, i) for i in range(self.accuracy.get_num_output_value()) ] # local result for the (vj)-th entry of the sub-arrat local_result = TableLoad(output_array, array_offset + vj) if self.break_error: return_statement_break = Statement( printf_input_function(*((vj, ) + local_inputs + (local_result, ))), self.accuracy.get_output_print_call(self.function_name, output_values)) else: return_statement_break = Statement( printf_input_function(*((vj, ) + local_inputs + (local_result, ))), self.accuracy.get_output_print_call(self.function_name, expected_values), Return(Constant(1, precision=ML_Int32))) # loop implementation to check sub-array array_offset # results validity check_array_loop = Loop( ReferenceAssign(vj, 0), vj < array_len, Statement( ConditionBlock( self.accuracy.get_output_check_test( local_result, expected_values), return_statement_break), ReferenceAssign(vj, vj + 1), )) return check_array_loop
def FCT_HexaRead_gen(input_format): legalized_input_format = input_format FCT_HexaRead = FunctionObject("hread", [HDL_LINE, legalized_input_format], ML_Void, FunctionOperator("hread", void_function=True, arity=2)) return FCT_HexaRead
def generate_datafile_testbench(self, tc_list, io_map, input_signals, output_signals, time_step, test_fname="test.input"): """ Generate testbench with input and output data externalized in a data file """ # textio function to read hexadecimal text def FCT_HexaRead_gen(input_format): legalized_input_format = input_format FCT_HexaRead = FunctionObject("hread", [HDL_LINE, legalized_input_format], ML_Void, FunctionOperator("hread", void_function=True, arity=2)) return FCT_HexaRead # textio function to read binary text FCT_Read = FunctionObject("read", [HDL_LINE, ML_StdLogic], ML_Void, FunctionOperator("read", void_function=True, arity=2)) input_line = Variable("input_line", precision=HDL_LINE, var_type=Variable.Local) # building ordered list of input and output signal names input_signal_list = [sname for sname in input_signals.keys()] input_statement = Statement() for input_name in input_signal_list: input_format = input_signals[input_name].precision input_var = Variable( "v_" + input_name, precision=input_format, var_type=Variable.Local) if input_format is ML_StdLogic: input_statement.add(FCT_Read(input_line, input_var)) else: input_statement.add(FCT_HexaRead_gen(input_format)(input_line, input_var)) input_statement.add(ReferenceAssign(input_signals[input_name], input_var)) output_signal_list = [sname for sname in output_signals.keys()] output_statement = Statement() for output_name in output_signal_list: output_format = output_signals[output_name].precision output_var = Variable( "v_" + output_name, precision=output_format, var_type=Variable.Local) if output_format is ML_StdLogic: output_statement.add(FCT_Read(input_line, output_var)) else: output_statement.add(FCT_HexaRead_gen(output_format)(input_line, output_var)) output_signal = output_signals[output_name] #value_msg = get_output_value_msg(output_signal, output_value) test_pass_cond, check_statement = get_output_check_statement(output_signal, output_name, output_var) input_msg = multi_Concatenation(*tuple(sum([[" %s=" % input_tag, signal_str_conversion(input_signals[input_tag], input_signals[input_tag].precision)] for input_tag in input_signal_list], []))) output_statement.add(check_statement) assert_statement = Assert( test_pass_cond, multi_Concatenation( "unexpected value for inputs ", input_msg, " expecting :", signal_str_conversion(output_var, output_format), " got :", signal_str_conversion(output_signal, output_format), precision = ML_String ), severity=Assert.Failure ) output_statement.add(assert_statement) self_component = self.implementation.get_component_object() self_instance = self_component(io_map = io_map, tag = "tested_entity") test_statement = Statement() DATA_FILE_NAME = test_fname with open(DATA_FILE_NAME, "w") as data_file: # dumping column tags data_file.write("# " + " ".join(input_signal_list + output_signal_list) + "\n") def get_raw_cst_string(cst_format, cst_value): size = int((cst_format.get_bit_size() + 3) / 4) return ("{:x}").format(cst_format.get_base_format().get_integer_coding(cst_value)).zfill(size) for input_values, output_values in tc_list: # TODO; generate test data file cst_list = [] for input_name in input_signal_list: input_value = input_values[input_name] input_format = input_signals[input_name].get_precision() cst_list.append(get_raw_cst_string(input_format, input_value)) for output_name in output_signal_list: output_value = output_values[output_name] output_format = output_signals[output_name].get_precision() cst_list.append(get_raw_cst_string(output_format, output_value)) # dumping line into file data_file.write(" ".join(cst_list) + "\n") input_stream = Variable("data_file", precision=HDL_FILE, var_type=Variable.Local) file_status = Variable("file_status", precision=HDL_OPEN_FILE_STATUS, var_type=Variable.Local) FCT_EndFile = FunctionObject("endfile", [HDL_FILE], ML_Bool, FunctionOperator("endfile", arity=1)) FCT_OpenFile = FunctionObject( "FILE_OPEN", [HDL_OPEN_FILE_STATUS, HDL_FILE, ML_String], ML_Void, FunctionOperator( "FILE_OPEN", arg_map={0: FO_Arg(0), 1: FO_Arg(1), 2: FO_Arg(2), 3: "READ_MODE"}, void_function=True)) FCT_ReadLine = FunctionObject( "readline", [HDL_FILE, HDL_LINE], ML_Void, FunctionOperator("readline", void_function=True, arity=2)) reset_statement = self.get_reset_statement(io_map, time_step) OPEN_OK = Constant("OPEN_OK", precision=HDL_OPEN_FILE_STATUS) testbench = CodeEntity("testbench") test_process = Process( reset_statement, FCT_OpenFile(file_status, input_stream, DATA_FILE_NAME), ConditionBlock( Comparison(file_status, OPEN_OK, specifier=Comparison.NotEqual), Assert( Constant(0, precision=ML_Bool), " \"failed to open file {}\"".format(DATA_FILE_NAME), severity=Assert.Failure ) ), # consume legend line FCT_ReadLine(input_stream, input_line), WhileLoop( LogicalNot(FCT_EndFile(input_stream)), Statement( FCT_ReadLine(input_stream, input_line), input_statement, Wait(time_step * (self.stage_num + 2)), output_statement, ), ), # end of test Assert( Constant(0, precision = ML_Bool), " \"end of test, no error encountered \"", severity = Assert.Warning ), # infinite end loop WhileLoop( Constant(1, precision=ML_Bool), Statement( Wait(time_step * (self.stage_num + 2)), ) ) ) testbench_scheme = Statement( self_instance, test_process ) if self.pipelined: half_time_step = time_step / 2 assert (half_time_step * 2) == time_step # adding clock process for pipelined bench clk_process = Process( Statement( ReferenceAssign( io_map["clk"], Constant(1, precision = ML_StdLogic) ), Wait(half_time_step), ReferenceAssign( io_map["clk"], Constant(0, precision = ML_StdLogic) ), Wait(half_time_step), ) ) testbench_scheme.push(clk_process) testbench.add_process(testbench_scheme) return [testbench]
from metalibm_core.opt.p_function_inlining import generate_inline_fct_scheme from metalibm_core.opt.opt_utils import evaluate_range from metalibm_core.code_generation.generic_processor import GenericProcessor from metalibm_core.utility.ml_template import DefaultArgTemplate, ML_NewArgTemplate from metalibm_core.utility.log_report import Log from metalibm_functions.function_map import FUNCTION_MAP LOG_VERBOSE_FUNCTION_EXPR = Log.LogLevel("FunctionExprVerbose") FUNCTION_OBJECT_MAPPING = { name: FunctionObject(name, [ML_Float] * FUNCTION_MAP[name][0].arity, ML_Float, None, range_function=FUNCTION_MAP[name][2]) for name in FUNCTION_MAP } FCT_DESC_PATTERN = r"([-+/* ().,]|\d+|{}|[xyzt])*".format("|".join( FUNCTION_OBJECT_MAPPING.keys())) def check_fct_expr(str_desc): """ check if function expression string is potentially valid """ return not re.fullmatch(FCT_DESC_PATTERN, str_desc) is None def function_parser(str_desc, var_mapping): """ parser of function expression, from str to ML_Operation graph