def generate_embedded_testbench(self, tc_list, io_map, input_signals, output_signals, time_step, test_fname="test.input"): """ Generate testbench with embedded input and output data """ self_component = self.implementation.get_component_object() self_instance = self_component(io_map = io_map, tag = "tested_entity") test_statement = Statement() for index, (input_values, output_values) in enumerate(tc_list): test_statement.add( self.implement_test_case(io_map, input_values, output_signals, output_values, time_step, index=index) ) reset_statement = self.get_reset_statement(io_map, time_step) testbench = CodeEntity("testbench") test_process = Process( reset_statement, test_statement, # end of test Assert( Constant(0, precision = ML_Bool), " \"end of test, no error encountered \"", severity = Assert.Warning ), # infinite end loop WhileLoop( Constant(1, precision=ML_Bool), Statement( Wait(time_step * (self.stage_num + 2)), ) ) ) testbench_scheme = Statement( self_instance, test_process ) if self.pipelined: half_time_step = time_step / 2 assert (half_time_step * 2) == time_step # adding clock process for pipelined bench clk_process = Process( Statement( ReferenceAssign( io_map["clk"], Constant(1, precision = ML_StdLogic) ), Wait(half_time_step), ReferenceAssign( io_map["clk"], Constant(0, precision = ML_StdLogic) ), Wait(half_time_step), ) ) testbench_scheme.push(clk_process) testbench.add_process(testbench_scheme) return [testbench]
def convert_bit_heap_to_fixed_point(current_bit_heap, signed=False): # final propagating sum op_index = 0 op_list = [] op_statement = Statement() while current_bit_heap.max_count() > 0: op_size = current_bit_heap.max_index - current_bit_heap.min_index + 1 op_format = ML_StdLogicVectorFormat(op_size) op_reduce = Signal("op_%d" % op_index, precision=op_format, var_type=Variable.Local) offset_index = current_bit_heap.min_index for index in range(current_bit_heap.min_index, current_bit_heap.max_index + 1): out_index = index - offset_index bit_list = current_bit_heap.pop_bits(index, 1) if len(bit_list) == 0: op_statement.push( ReferenceAssign(BitSelection(op_reduce, out_index), Constant(0, precision=ML_StdLogic))) else: assert len(bit_list) == 1 op_statement.push( ReferenceAssign(BitSelection(op_reduce, out_index), bit_list[0])) op_precision = fixed_point(op_size + offset_index, -offset_index, signed=signed) op_list.append( PlaceHolder(TypeCast(op_reduce, precision=op_precision), op_statement)) op_index += 1 return op_list, op_statement
def generate_auto_test(self, test_num=10, test_range=Interval(-1.0, 1.0), debug=False, time_step=10): """ time_step: duration of a stage (in ns) """ # instanciating tested component # map of input_tag -> input_signal and output_tag -> output_signal io_map = {} # map of input_tag -> input_signal, excludind commodity signals # (e.g. clock and reset) input_signals = {} # map of output_tag -> output_signal output_signals = {} # excluding clock and reset signals from argument list # reduced_arg_list = [input_port for input_port in self.implementation.get_arg_list() if not input_port.get_tag() in ["clk", "reset"]] reduced_arg_list = self.implementation.get_arg_list() for input_port in reduced_arg_list: input_tag = input_port.get_tag() input_signal = Signal(input_tag + "_i", precision=input_port.get_precision(), var_type=Signal.Local) io_map[input_tag] = input_signal if not input_tag in ["clk", "reset"]: input_signals[input_tag] = input_signal for output_port in self.implementation.get_output_port(): output_tag = output_port.get_tag() output_signal = Signal(output_tag + "_o", precision=output_port.get_precision(), var_type=Signal.Local) io_map[output_tag] = output_signal output_signals[output_tag] = output_signal # building list of test cases tc_list = [] self_component = self.implementation.get_component_object() self_instance = self_component(io_map=io_map, tag="tested_entity") test_statement = Statement() # initializing random test case generator self.init_test_generator() # Appending standard test cases if required if self.auto_test_std: tc_list += self.standard_test_cases for i in range(test_num): input_values = self.generate_test_case(input_signals, io_map, i, test_range) tc_list.append((input_values, None)) def compute_results(tc): """ update test case with output values if required """ input_values, output_values = tc if output_values is None: return input_values, self.numeric_emulate(input_values) else: return tc # filling output values tc_list = [compute_results(tc) for tc in tc_list] for input_values, output_values in tc_list: test_statement.add( self.implement_test_case(io_map, input_values, output_signals, output_values, time_step)) testbench = CodeEntity("testbench") test_process = Process( test_statement, # end of test Assert(Constant(0, precision=ML_Bool), " \"end of test, no error encountered \"", severity=Assert.Failure)) testbench_scheme = Statement(self_instance, test_process) if self.pipelined: half_time_step = time_step / 2 assert (half_time_step * 2) == time_step # adding clock process for pipelined bench clk_process = Process( Statement( ReferenceAssign(io_map["clk"], Constant(1, precision=ML_StdLogic)), Wait(half_time_step), ReferenceAssign(io_map["clk"], Constant(0, precision=ML_StdLogic)), Wait(half_time_step), )) testbench_scheme.push(clk_process) testbench.add_process(testbench_scheme) return [testbench]
def generate_pipeline_stage(entity, reset=False, recirculate=False, one_process_per_stage=True): """ Process a entity to generate pipeline stages required """ retiming_map = {} retime_map = RetimeMap() output_assign_list = entity.implementation.get_output_assign() for output in output_assign_list: Log.report(Log.Verbose, "generating pipeline from output {} ", output) retime_op(output, retime_map) for recirculate_stage in entity.recirculate_signal_map: recirculate_ctrl = entity.recirculate_signal_map[recirculate_stage] Log.report(Log.Verbose, "generating pipeline from recirculation control signal {}", recirculate_ctrl) retime_op(recirculate_ctrl, retime_map) process_statement = Statement() # adding stage forward process clk = entity.get_clk_input() clock_statement = Statement() # handle towards the first clock Process (in generation order) # which must be the one whose pre_statement is filled with # signal required to be generated outside the processes first_process = False for stage_id in sorted(retime_map.stage_forward.keys()): stage_statement = Statement(*tuple( assign for assign in retime_map.stage_forward[stage_id])) if reset: reset_statement = Statement() for assign in retime_map.stage_forward[stage_id]: target = assign.get_input(0) reset_value = Constant(0, precision=target.get_precision()) reset_statement.push(ReferenceAssign(target, reset_value)) if recirculate: # inserting recirculation condition recirculate_signal = entity.get_recirculate_signal(stage_id) stage_statement = ConditionBlock( Comparison( recirculate_signal, Constant(0, precision=recirculate_signal.get_precision()), specifier=Comparison.Equal, precision=ML_Bool), stage_statement) stage_statement = ConditionBlock( Comparison(entity.reset_signal, Constant(1, precision=ML_StdLogic), specifier=Comparison.Equal, precision=ML_Bool), reset_statement, stage_statement) # To meet simulation / synthesis tools, we build # a single if clock predicate block per stage clock_block = ConditionBlock( LogicalAnd(Event(clk, precision=ML_Bool), Comparison(clk, Constant(1, precision=ML_StdLogic), specifier=Comparison.Equal, precision=ML_Bool), precision=ML_Bool), stage_statement) if one_process_per_stage: clock_process = Process(clock_block, sensibility_list=[clk]) entity.implementation.add_process(clock_process) first_process = first_process or clock_process else: clock_statement.add(clock_block) if one_process_per_stage: pass else: process_statement.add(clock_statement) pipeline_process = Process(process_statement, sensibility_list=[clk]) entity.implementation.add_process(pipeline_process) first_process = pipeline_process # statement that gather signals which must be pre-computed for op in retime_map.pre_statement: first_process.add_to_pre_statement(op) stage_num = len(retime_map.stage_forward.keys()) #print "there are %d pipeline stages" % (stage_num) return stage_num
def generate_bench(self, processor, test_num=1000, unroll_factor=10): """ generate performance bench for self.op_class """ initial_inputs = [ Constant(random.uniform(inf(self.init_interval), sup(self.init_interval)), precision=precision) for i, precision in enumerate(self.input_precisions) ] var_inputs = [ Variable("var_%d" % i, precision=FormatAttributeWrapper(precision, ["volatile"]), var_type=Variable.Local) for i, precision in enumerate(self.input_precisions) ] printf_timing_op = FunctionOperator( "printf", arg_map={ 0: "\"%s[%s] %%lld elts computed "\ "in %%lld cycles =>\\n %%.3f CPE \\n\"" % ( self.bench_name, self.output_precision.get_display_format() ), 1: FO_Arg(0), 2: FO_Arg(1), 3: FO_Arg(2), 4: FO_Arg(3) }, void_function=True ) printf_timing_function = FunctionObject( "printf", [self.output_precision, ML_Int64, ML_Int64, ML_Binary64], ML_Void, printf_timing_op) timer = Variable("timer", precision=ML_Int64, var_type=Variable.Local) void_function_op = FunctionOperator("(void)", arity=1, void_function=True) void_function = FunctionObject("(void)", [self.output_precision], ML_Void, void_function_op) # initialization of operation inputs init_assign = metaop.Statement() for var_input, init_value in zip(var_inputs, initial_inputs): init_assign.push(ReferenceAssign(var_input, init_value)) # test loop loop_i = Variable("i", precision=ML_Int64, var_type=Variable.Local) test_num_cst = Constant(test_num / unroll_factor, precision=ML_Int64, tag="test_num") # Goal build a chain of dependant operation to measure # elementary operation latency local_inputs = tuple(var_inputs) local_result = self.op_class(*local_inputs, precision=self.output_precision, unbreakable=True) for i in range(unroll_factor - 1): local_inputs = tuple([local_result] + var_inputs[1:]) local_result = self.op_class(*local_inputs, precision=self.output_precision, unbreakable=True) # renormalisation local_result = self.renorm_function(local_result) # variable assignation to build dependency chain var_assign = Statement() var_assign.push(ReferenceAssign(var_inputs[0], local_result)) final_value = var_inputs[0] # loop increment value loop_increment = 1 test_loop = Loop( ReferenceAssign(loop_i, Constant(0, precision=ML_Int32)), loop_i < test_num_cst, Statement(var_assign, ReferenceAssign(loop_i, loop_i + loop_increment)), ) # bench scheme test_scheme = Statement( ReferenceAssign(timer, processor.get_current_timestamp()), init_assign, test_loop, ReferenceAssign( timer, Subtraction(processor.get_current_timestamp(), timer, precision=ML_Int64)), # prevent intermediary variable simplification void_function(final_value), printf_timing_function( final_value, Constant(test_num, precision=ML_Int64), timer, Division(Conversion(timer, precision=ML_Binary64), Constant(test_num, precision=ML_Binary64), precision=ML_Binary64)) # ,Return(Constant(0, precision = ML_Int32)) ) return test_scheme
def generate_pipeline_stage(entity, reset=False, recirculate=False, one_process_per_stage=True, synchronous_reset=True, negate_reset=False): """ Process a entity to generate pipeline stages required to implement pipeline structure described by node's stage attributes. :param entity: input entity to pipeline :type entity: ML_EntityBasis :param reset: indicate if a reset must be generated for pipeline registers :type reset: bool :param recirculate: trigger the integration of a recirculation signal to the stage flopping condition :type recirculate: bool :param one_process_per_stage:forces the generation of a separate process for each pipeline stage (else a unique process is generated for all the stages :type one_process_per_stage: bool :param synchronous_reset: triggers the generation of a clocked reset :type synchronous_reset: bool :param negate_reset: if set indicates the reset is triggered when reset signal is 0 (else 1) :type negate_reset: bool """ retiming_map = {} retime_map = RetimeMap() output_assign_list = entity.implementation.get_output_assign() for output in output_assign_list: Log.report(Log.Verbose, "generating pipeline from output {} ", output) retime_op(output, retime_map) for recirculate_stage in entity.recirculate_signal_map: recirculate_ctrl = entity.recirculate_signal_map[recirculate_stage] Log.report(Log.Verbose, "generating pipeline from recirculation control signal {}", recirculate_ctrl) retime_op(recirculate_ctrl, retime_map) process_statement = Statement() # adding stage forward process clk = entity.get_clk_input() clock_statement = Statement() global_reset_statement = Statement() Log.report(Log.Info, "design has {} flip-flop(s).", retime_map.register_count) # handle towards the first clock Process (in generation order) # which must be the one whose pre_statement is filled with # signal required to be generated outside the processes first_process = False for stage_id in sorted(retime_map.stage_forward.keys()): stage_statement = Statement( *tuple(assign for assign in retime_map.stage_forward[stage_id])) if reset: reset_statement = Statement() for assign in retime_map.stage_forward[stage_id]: target = assign.get_input(0) reset_value = Constant(0, precision=target.get_precision()) reset_statement.push(ReferenceAssign(target, reset_value)) if recirculate: # inserting recirculation condition recirculate_signal = entity.get_recirculate_signal(stage_id) stage_statement = ConditionBlock( Comparison( recirculate_signal, Constant(0, precision=recirculate_signal.get_precision()), specifier=Comparison.Equal, precision=ML_Bool ), stage_statement ) if synchronous_reset: # build a compound statement with reset and flops statement stage_statement = ConditionBlock( Comparison( entity.reset_signal, Constant(0 if negate_reset else 1, precision=ML_StdLogic), specifier=Comparison.Equal, precision=ML_Bool ), reset_statement, stage_statement ) else: # for asynchronous reset, reset is in a non-clocked statement # and will be added at the end of stage to the same process than # register clocking global_reset_statement.add(reset_statement) # To meet simulation / synthesis tools, we build # a single if clock predicate block per stage clock_block = ConditionBlock( LogicalAnd( Event(clk, precision=ML_Bool), Comparison( clk, Constant(1, precision=ML_StdLogic), specifier=Comparison.Equal, precision=ML_Bool ), precision=ML_Bool ), stage_statement ) if one_process_per_stage: if reset and not synchronous_reset: clock_block = ConditionBlock( Comparison( entity.reset_signal, Constant(0 if negate_reset else 1, precision=ML_StdLogic), specifier=Comparison.Equal, precision=ML_Bool ), reset_statement, clock_block ) clock_process = Process(clock_block, sensibility_list=[clk, entity.reset_signal]) else: # no reset, or synchronous reset (already appended to clock_block) clock_process = Process(clock_block, sensibility_list=[clk]) entity.implementation.add_process(clock_process) first_process = first_process or clock_process else: clock_statement.add(clock_block) if one_process_per_stage: # reset and clock processed where generated at each stage loop pass else: process_statement.add(clock_statement) if synchronous_reset: pipeline_process = Process(process_statement, sensibility_list=[clk]) else: process_statement.add(global_reset_statement) pipeline_process = Process(process_statement, sensibility_list=[clk, entity.reset_signal]) entity.implementation.add_process(pipeline_process) first_process = pipeline_process # statement that gather signals which must be pre-computed for op in retime_map.pre_statement: first_process.add_to_pre_statement(op) stage_num = len(retime_map.stage_forward.keys()) Log.report(Log.Info, "there are {} pipeline stage(s)", stage_num) return stage_num
def generate_auto_test(self, test_num=10, test_range=Interval(-1.0, 1.0), debug=False, time_step=10): """ time_step: duration of a stage (in ns) """ # instanciating tested component # map of input_tag -> input_signal and output_tag -> output_signal io_map = {} # map of input_tag -> input_signal, excludind commodity signals # (e.g. clock and reset) input_signals = {} # map of output_tag -> output_signal output_signals = {} # excluding clock and reset signals from argument list # reduced_arg_list = [input_port for input_port in self.implementation.get_arg_list() if not input_port.get_tag() in ["clk", "reset"]] reduced_arg_list = self.implementation.get_arg_list() for input_port in reduced_arg_list: input_tag = input_port.get_tag() input_signal = Signal(input_tag + "_i", precision=input_port.get_precision(), var_type=Signal.Local) io_map[input_tag] = input_signal if not input_tag in ["clk", "reset"]: input_signals[input_tag] = input_signal for output_port in self.implementation.get_output_port(): output_tag = output_port.get_tag() output_signal = Signal(output_tag + "_o", precision=output_port.get_precision(), var_type=Signal.Local) io_map[output_tag] = output_signal output_signals[output_tag] = output_signal # building list of test cases tc_list = [] self_component = self.implementation.get_component_object() self_instance = self_component(io_map=io_map, tag="tested_entity") test_statement = Statement() # initializing random test case generator self.init_test_generator() # Appending standard test cases if required if self.auto_test_std: tc_list += self.standard_test_cases for i in range(test_num): input_values = self.generate_test_case(input_signals, io_map, i, test_range) tc_list.append((input_values, None)) def compute_results(tc): """ update test case with output values if required """ input_values, output_values = tc if output_values is None: return input_values, self.numeric_emulate(input_values) else: return tc # filling output values tc_list = [compute_results(tc) for tc in tc_list] for input_values, output_values in tc_list: input_msg = "" # Adding input setting for input_tag in input_values: input_signal = io_map[input_tag] # FIXME: correct value generation depending on signal precision input_value = input_values[input_tag] test_statement.add( ReferenceAssign( input_signal, Constant(input_value, precision=input_signal.get_precision()))) value_msg = input_signal.get_precision().get_cst( input_value, language=VHDL_Code).replace('"', "'") value_msg += " / " + hex(input_signal.get_precision( ).get_base_format().get_integer_coding(input_value)) input_msg += " {}={} ".format(input_tag, value_msg) test_statement.add(Wait(time_step * self.stage_num)) # Adding output value comparison for output_tag in output_signals: output_signal = output_signals[output_tag] output_value = Constant( output_values[output_tag], precision=output_signal.get_precision()) output_precision = output_signal.get_precision() expected_dec = output_precision.get_cst( output_values[output_tag], language=VHDL_Code).replace('"', "'") expected_hex = " / " + hex( output_precision.get_base_format().get_integer_coding( output_values[output_tag])) value_msg = "{} / {}".format(expected_dec, expected_hex) test_pass_cond = Comparison(output_signal, output_value, specifier=Comparison.Equal, precision=ML_Bool) test_statement.add( ConditionBlock( LogicalNot(test_pass_cond, precision=ML_Bool), Report( Concatenation( " result for {}: ".format(output_tag), Conversion(TypeCast( output_signal, precision=ML_StdLogicVectorFormat( output_signal.get_precision( ).get_bit_size())), precision=ML_String), precision=ML_String)))) test_statement.add( Assert( test_pass_cond, "\"unexpected value for inputs {input_msg}, output {output_tag}, expecting {value_msg}, got: \"" .format(input_msg=input_msg, output_tag=output_tag, value_msg=value_msg), severity=Assert.Failure)) testbench = CodeEntity("testbench") test_process = Process( test_statement, # end of test Assert(Constant(0, precision=ML_Bool), " \"end of test, no error encountered \"", severity=Assert.Failure)) testbench_scheme = Statement(self_instance, test_process) if self.pipelined: half_time_step = time_step / 2 assert (half_time_step * 2) == time_step # adding clock process for pipelined bench clk_process = Process( Statement( ReferenceAssign(io_map["clk"], Constant(1, precision=ML_StdLogic)), Wait(half_time_step), ReferenceAssign(io_map["clk"], Constant(0, precision=ML_StdLogic)), Wait(half_time_step), )) testbench_scheme.push(clk_process) testbench.add_process(testbench_scheme) return [testbench]
def generate_datafile_testbench(self, tc_list, io_map, input_signals, output_signals, time_step, test_fname="test.input"): """ Generate testbench with input and output data externalized in a data file """ # textio function to read hexadecimal text def FCT_HexaRead_gen(input_format): legalized_input_format = input_format FCT_HexaRead = FunctionObject("hread", [HDL_LINE, legalized_input_format], ML_Void, FunctionOperator("hread", void_function=True, arity=2)) return FCT_HexaRead # textio function to read binary text FCT_Read = FunctionObject("read", [HDL_LINE, ML_StdLogic], ML_Void, FunctionOperator("read", void_function=True, arity=2)) input_line = Variable("input_line", precision=HDL_LINE, var_type=Variable.Local) # building ordered list of input and output signal names input_signal_list = [sname for sname in input_signals.keys()] input_statement = Statement() for input_name in input_signal_list: input_format = input_signals[input_name].precision input_var = Variable( "v_" + input_name, precision=input_format, var_type=Variable.Local) if input_format is ML_StdLogic: input_statement.add(FCT_Read(input_line, input_var)) else: input_statement.add(FCT_HexaRead_gen(input_format)(input_line, input_var)) input_statement.add(ReferenceAssign(input_signals[input_name], input_var)) output_signal_list = [sname for sname in output_signals.keys()] output_statement = Statement() for output_name in output_signal_list: output_format = output_signals[output_name].precision output_var = Variable( "v_" + output_name, precision=output_format, var_type=Variable.Local) if output_format is ML_StdLogic: output_statement.add(FCT_Read(input_line, output_var)) else: output_statement.add(FCT_HexaRead_gen(output_format)(input_line, output_var)) output_signal = output_signals[output_name] #value_msg = get_output_value_msg(output_signal, output_value) test_pass_cond, check_statement = get_output_check_statement(output_signal, output_name, output_var) input_msg = multi_Concatenation(*tuple(sum([[" %s=" % input_tag, signal_str_conversion(input_signals[input_tag], input_signals[input_tag].precision)] for input_tag in input_signal_list], []))) output_statement.add(check_statement) assert_statement = Assert( test_pass_cond, multi_Concatenation( "unexpected value for inputs ", input_msg, " expecting :", signal_str_conversion(output_var, output_format), " got :", signal_str_conversion(output_signal, output_format), precision = ML_String ), severity=Assert.Failure ) output_statement.add(assert_statement) self_component = self.implementation.get_component_object() self_instance = self_component(io_map = io_map, tag = "tested_entity") test_statement = Statement() DATA_FILE_NAME = test_fname with open(DATA_FILE_NAME, "w") as data_file: # dumping column tags data_file.write("# " + " ".join(input_signal_list + output_signal_list) + "\n") def get_raw_cst_string(cst_format, cst_value): size = int((cst_format.get_bit_size() + 3) / 4) return ("{:x}").format(cst_format.get_base_format().get_integer_coding(cst_value)).zfill(size) for input_values, output_values in tc_list: # TODO; generate test data file cst_list = [] for input_name in input_signal_list: input_value = input_values[input_name] input_format = input_signals[input_name].get_precision() cst_list.append(get_raw_cst_string(input_format, input_value)) for output_name in output_signal_list: output_value = output_values[output_name] output_format = output_signals[output_name].get_precision() cst_list.append(get_raw_cst_string(output_format, output_value)) # dumping line into file data_file.write(" ".join(cst_list) + "\n") input_stream = Variable("data_file", precision=HDL_FILE, var_type=Variable.Local) file_status = Variable("file_status", precision=HDL_OPEN_FILE_STATUS, var_type=Variable.Local) FCT_EndFile = FunctionObject("endfile", [HDL_FILE], ML_Bool, FunctionOperator("endfile", arity=1)) FCT_OpenFile = FunctionObject( "FILE_OPEN", [HDL_OPEN_FILE_STATUS, HDL_FILE, ML_String], ML_Void, FunctionOperator( "FILE_OPEN", arg_map={0: FO_Arg(0), 1: FO_Arg(1), 2: FO_Arg(2), 3: "READ_MODE"}, void_function=True)) FCT_ReadLine = FunctionObject( "readline", [HDL_FILE, HDL_LINE], ML_Void, FunctionOperator("readline", void_function=True, arity=2)) reset_statement = self.get_reset_statement(io_map, time_step) OPEN_OK = Constant("OPEN_OK", precision=HDL_OPEN_FILE_STATUS) testbench = CodeEntity("testbench") test_process = Process( reset_statement, FCT_OpenFile(file_status, input_stream, DATA_FILE_NAME), ConditionBlock( Comparison(file_status, OPEN_OK, specifier=Comparison.NotEqual), Assert( Constant(0, precision=ML_Bool), " \"failed to open file {}\"".format(DATA_FILE_NAME), severity=Assert.Failure ) ), # consume legend line FCT_ReadLine(input_stream, input_line), WhileLoop( LogicalNot(FCT_EndFile(input_stream)), Statement( FCT_ReadLine(input_stream, input_line), input_statement, Wait(time_step * (self.stage_num + 2)), output_statement, ), ), # end of test Assert( Constant(0, precision = ML_Bool), " \"end of test, no error encountered \"", severity = Assert.Warning ), # infinite end loop WhileLoop( Constant(1, precision=ML_Bool), Statement( Wait(time_step * (self.stage_num + 2)), ) ) ) testbench_scheme = Statement( self_instance, test_process ) if self.pipelined: half_time_step = time_step / 2 assert (half_time_step * 2) == time_step # adding clock process for pipelined bench clk_process = Process( Statement( ReferenceAssign( io_map["clk"], Constant(1, precision = ML_StdLogic) ), Wait(half_time_step), ReferenceAssign( io_map["clk"], Constant(0, precision = ML_StdLogic) ), Wait(half_time_step), ) ) testbench_scheme.push(clk_process) testbench.add_process(testbench_scheme) return [testbench]