Пример #1
0
  def generate_embedded_testbench(self, tc_list, io_map, input_signals, output_signals, time_step, test_fname="test.input"):
    """ Generate testbench with embedded input and output data """
    self_component = self.implementation.get_component_object()
    self_instance = self_component(io_map = io_map, tag = "tested_entity")
    test_statement = Statement()

    for index, (input_values, output_values) in enumerate(tc_list):
      test_statement.add(
          self.implement_test_case(io_map, input_values, output_signals, output_values, time_step, index=index)
      )

    reset_statement = self.get_reset_statement(io_map, time_step)

    testbench = CodeEntity("testbench")
    test_process = Process(
      reset_statement,
      test_statement,
      # end of test
      Assert(
        Constant(0, precision = ML_Bool),
        " \"end of test, no error encountered \"",
        severity = Assert.Warning
      ),
      # infinite end loop
        WhileLoop(
            Constant(1, precision=ML_Bool),
            Statement(
                Wait(time_step * (self.stage_num + 2)),
            )
        )
    )

    testbench_scheme = Statement(
      self_instance,
      test_process
    )

    if self.pipelined:
        half_time_step = time_step / 2
        assert (half_time_step * 2) == time_step
        # adding clock process for pipelined bench
        clk_process = Process(
            Statement(
                ReferenceAssign(
                    io_map["clk"],
                    Constant(1, precision = ML_StdLogic)
                ),
                Wait(half_time_step),
                ReferenceAssign(
                    io_map["clk"],
                    Constant(0, precision = ML_StdLogic)
                ),
                Wait(half_time_step),
            )
        )
        testbench_scheme.push(clk_process)

    testbench.add_process(testbench_scheme)

    return [testbench]
 def expand_sub_ndrange(var_range_list, kernel):
     if len(var_range_list) == 0:
         pre_expanded_kernel = expand_kernel_expr(kernel)
         expanded_kernel, statement_list = extract_placeholder(
             pre_expanded_kernel)
         expanded_statement = Statement(*tuple(statement_list))
         print("expand_ndrange: ", expanded_kernel, statement_list)
         if not expanded_kernel is None:
             # append expanded_kernel at the Statement's end once
             # every PlaceHolder's dependency has been resolved
             expanded_statement.add(expanded_kernel)
         return expanded_statement
     else:
         var_range = var_range_list.pop(0)
         scheme = Loop(
             # init statement
             ReferenceAssign(var_range.var_index, var_range.first_index),
             # exit condition
             var_range.var_index <= var_range.last_index,
             # loop body
             Statement(
                 expand_sub_ndrange(var_range_list, kernel),
                 # loop iterator increment
                 ReferenceAssign(var_range.var_index, var_range.var_index +
                                 var_range.index_step)),
         )
     return scheme
Пример #3
0
    def generate_scheme(self):
        vx = self.implementation.add_input_variable("x", FIXED_FORMAT)
        # declaring specific interval for input variable <x>
        vx.set_interval(Interval(-1, 1))

        acc_format = ML_Custom_FixedPoint_Format(6, 58, False)

        c = Constant(2, precision=acc_format, tag="C2")

        ivx = vx
        add_ivx = Addition(
                    c,
                    Multiplication(ivx, ivx, precision=acc_format, tag="mul"),
                    precision=acc_format,
                    tag="add"
                  )
        result = add_ivx

        input_mapping = {ivx: ivx.get_precision().round_sollya_object(0.125)}
        error_eval_map = runtime_error_eval.generate_error_eval_graph(result, input_mapping)

        # dummy scheme to make functionnal code generation
        scheme = Statement()
        for node in error_eval_map:
            scheme.add(error_eval_map[node])
        scheme.add(Return(result))
        return scheme
    def generate_tensor_check_loop(self, tensor_descriptors, input_tables,
                                   output_tables):
        # unpack tensor descriptors tuple
        (input_tensor_descriptor_list,
         output_tensor_descriptor_list) = tensor_descriptors
        # internal array iterator index
        vj = Variable("j", precision=ML_UInt32, var_type=Variable.Local)

        printf_error_detail_function = self.get_printf_error_detail_fct(
            output_tensor_descriptor_list[0])

        NUM_INPUT_ARRAY = len(input_tables)

        # generate the expected table for the whole multi-array
        expected_tables = self.generate_expected_table(tensor_descriptors,
                                                       input_tables)

        # global statement to list all checks
        check_statement = Statement()

        # implement check for each output tensor
        for out_id, out_td in enumerate(output_tensor_descriptor_list):
            # expected values for the (vj)-th entry of the sub-array
            expected_values = [
                TableLoad(expected_tables[out_id], vj, i)
                for i in range(self.accuracy.get_num_output_value())
            ]
            # local result for the (vj)-th entry of the sub-array
            local_result = TableLoad(output_tables[out_id], vj)

            array_len = out_td.get_bounding_size()

            if self.break_error:
                return_statement_break = Statement(
                    printf_error_detail_function(*((vj, ) + (local_result, ))),
                    self.accuracy.get_output_print_call(
                        self.function_name, output_values))
            else:
                return_statement_break = Statement(
                    printf_error_detail_function(*((vj, ) + (local_result, ))),
                    self.accuracy.get_output_print_call(
                        self.function_name, expected_values),
                    Return(Constant(1, precision=ML_Int32)))
            check_array_loop = Loop(
                ReferenceAssign(vj, 0), vj < array_len,
                Statement(
                    ConditionBlock(
                        self.accuracy.get_output_check_test(
                            local_result, expected_values),
                        return_statement_break),
                    ReferenceAssign(vj, vj + 1),
                ))
            check_statement.add(check_array_loop)
        return check_statement
Пример #5
0
  def implement_test_case(self, io_map, input_values, output_signals, output_values, time_step):
      """ Implement the test case check and assertion whose I/Os values
          are described in input_values and output_values dict """
      test_statement = Statement()
      input_msg = ""
      # Adding input setting
      for input_tag in input_values:
        input_signal = io_map[input_tag]
        # FIXME: correct value generation depending on signal precision
        input_value = input_values[input_tag]
        test_statement.add(get_input_assign(input_signal, input_value))
        input_msg += get_input_msg(input_tag, input_signal, input_value)

      test_statement.add(Wait(time_step * (self.stage_num + 2)))

      # Adding output value comparison
      for output_tag in output_signals:
        output_signal = output_signals[output_tag]
        output_value = output_values[output_tag]
        output_cst_value  = Constant(output_value, precision=output_signal.get_precision())

        value_msg = get_output_value_msg(output_signal, output_value)
        test_pass_cond, check_statement = get_output_check_statement(output_signal, output_tag, output_cst_value)

        test_statement.add(check_statement)
        assert_statement = Assert(
          test_pass_cond,
          "\"unexpected value for inputs {input_msg}, output {output_tag}, expecting {value_msg}, got: \"".format(input_msg = input_msg, output_tag = output_tag, value_msg = value_msg),
          severity = Assert.Failure
        )
        test_statement.add(assert_statement)
      return test_statement
Пример #6
0
 def get_reset_statement(self, io_map, time_step):
   reset_statement = Statement()
   if self.reset_pipeline:
       # TODO: fix pipeline register reset
       reset_value = 0 if self.negate_reset else 1
       unreset_value = 1 - reset_value
       reset_signal = io_map[self.reset_name]
       reset_statement.add(ReferenceAssign(reset_signal, Constant(reset_value, precision=ML_StdLogic)))
       # to account for synchronous reset
       reset_statement.add(Wait(time_step * 3))
       reset_statement.add(ReferenceAssign(reset_signal, Constant(unreset_value, precision=ML_StdLogic)))
       reset_statement.add(Wait(time_step * 3))
       for recirculate_signal in self.recirculate_signal_map.values():
           reset_statement.add(ReferenceAssign(io_map[recirculate_signal.get_tag()], Constant(0, precision=ML_StdLogic)))
   return reset_statement
Пример #7
0
def generate_pipeline_stage(entity):
    """ Process a entity to generate pipeline stages required """
    retiming_map = {}
    retime_map = RetimeMap()
    output_assign_list = entity.implementation.get_output_assign()
    for output in output_assign_list:
        Log.report(
            Log.Verbose,
            "generating pipeline from output %s " % (output.get_str(depth=1)))
        retime_op(output, retime_map)
    process_statement = Statement()

    # adding stage forward process
    clk = entity.get_clk_input()
    clock_statement = Statement()
    for stage_id in sorted(retime_map.stage_forward.keys()):
        stage_statement = Statement(*tuple(
            assign for assign in retime_map.stage_forward[stage_id]))
        clock_statement.add(stage_statement)
    # To meet simulation / synthesis tools, we build
    # a single if clock predicate block which contains all
    # the stage register allocation
    clock_block = ConditionBlock(
        LogicalAnd(Event(clk, precision=ML_Bool),
                   Comparison(clk,
                              Constant(1, precision=ML_StdLogic),
                              specifier=Comparison.Equal,
                              precision=ML_Bool),
                   precision=ML_Bool), clock_statement)
    process_statement.add(clock_block)
    pipeline_process = Process(process_statement, sensibility_list=[clk])
    for op in retime_map.pre_statement:
        pipeline_process.add_to_pre_statement(op)
    entity.implementation.add_process(pipeline_process)
    stage_num = len(retime_map.stage_forward.keys())
    #print "there are %d pipeline stages" % (stage_num)
    return stage_num
Пример #8
0
    def generate_auto_test(self,
                           test_num=10,
                           test_range=Interval(-1.0, 1.0),
                           debug=False,
                           time_step=10):
        """ time_step: duration of a stage (in ns) """
        # instanciating tested component
        # map of input_tag -> input_signal and output_tag -> output_signal
        io_map = {}
        # map of input_tag -> input_signal, excludind commodity signals
        # (e.g. clock and reset)
        input_signals = {}
        # map of output_tag -> output_signal
        output_signals = {}
        # excluding clock and reset signals from argument list
        # reduced_arg_list = [input_port for input_port in self.implementation.get_arg_list() if not input_port.get_tag() in ["clk", "reset"]]
        reduced_arg_list = self.implementation.get_arg_list()
        for input_port in reduced_arg_list:
            input_tag = input_port.get_tag()
            input_signal = Signal(input_tag + "_i",
                                  precision=input_port.get_precision(),
                                  var_type=Signal.Local)
            io_map[input_tag] = input_signal
            if not input_tag in ["clk", "reset"]:
                input_signals[input_tag] = input_signal
        for output_port in self.implementation.get_output_port():
            output_tag = output_port.get_tag()
            output_signal = Signal(output_tag + "_o",
                                   precision=output_port.get_precision(),
                                   var_type=Signal.Local)
            io_map[output_tag] = output_signal
            output_signals[output_tag] = output_signal

        # building list of test cases
        tc_list = []

        self_component = self.implementation.get_component_object()
        self_instance = self_component(io_map=io_map, tag="tested_entity")
        test_statement = Statement()

        # initializing random test case generator
        self.init_test_generator()

        # Appending standard test cases if required
        if self.auto_test_std:
            tc_list += self.standard_test_cases

        for i in range(test_num):
            input_values = self.generate_test_case(input_signals, io_map, i,
                                                   test_range)
            tc_list.append((input_values, None))

        def compute_results(tc):
            """ update test case with output values if required """
            input_values, output_values = tc
            if output_values is None:
                return input_values, self.numeric_emulate(input_values)
            else:
                return tc

        # filling output values
        tc_list = [compute_results(tc) for tc in tc_list]

        for input_values, output_values in tc_list:
            test_statement.add(
                self.implement_test_case(io_map, input_values, output_signals,
                                         output_values, time_step))

        testbench = CodeEntity("testbench")
        test_process = Process(
            test_statement,
            # end of test
            Assert(Constant(0, precision=ML_Bool),
                   " \"end of test, no error encountered \"",
                   severity=Assert.Failure))

        testbench_scheme = Statement(self_instance, test_process)

        if self.pipelined:
            half_time_step = time_step / 2
            assert (half_time_step * 2) == time_step
            # adding clock process for pipelined bench
            clk_process = Process(
                Statement(
                    ReferenceAssign(io_map["clk"],
                                    Constant(1, precision=ML_StdLogic)),
                    Wait(half_time_step),
                    ReferenceAssign(io_map["clk"],
                                    Constant(0, precision=ML_StdLogic)),
                    Wait(half_time_step),
                ))
            testbench_scheme.push(clk_process)

        testbench.add_process(testbench_scheme)

        return [testbench]
Пример #9
0
def generate_pipeline_stage(entity,
                            reset=False,
                            recirculate=False,
                            one_process_per_stage=True):
    """ Process a entity to generate pipeline stages required """
    retiming_map = {}
    retime_map = RetimeMap()
    output_assign_list = entity.implementation.get_output_assign()
    for output in output_assign_list:
        Log.report(Log.Verbose, "generating pipeline from output {} ", output)
        retime_op(output, retime_map)
    for recirculate_stage in entity.recirculate_signal_map:
        recirculate_ctrl = entity.recirculate_signal_map[recirculate_stage]
        Log.report(Log.Verbose,
                   "generating pipeline from recirculation control signal {}",
                   recirculate_ctrl)
        retime_op(recirculate_ctrl, retime_map)

    process_statement = Statement()

    # adding stage forward process
    clk = entity.get_clk_input()
    clock_statement = Statement()
    # handle towards the first clock Process (in generation order)
    # which must be the one whose pre_statement is filled with
    # signal required to be generated outside the processes
    first_process = False
    for stage_id in sorted(retime_map.stage_forward.keys()):
        stage_statement = Statement(*tuple(
            assign for assign in retime_map.stage_forward[stage_id]))

        if reset:
            reset_statement = Statement()
            for assign in retime_map.stage_forward[stage_id]:
                target = assign.get_input(0)
                reset_value = Constant(0, precision=target.get_precision())
                reset_statement.push(ReferenceAssign(target, reset_value))

            if recirculate:
                # inserting recirculation condition
                recirculate_signal = entity.get_recirculate_signal(stage_id)
                stage_statement = ConditionBlock(
                    Comparison(
                        recirculate_signal,
                        Constant(0,
                                 precision=recirculate_signal.get_precision()),
                        specifier=Comparison.Equal,
                        precision=ML_Bool), stage_statement)

            stage_statement = ConditionBlock(
                Comparison(entity.reset_signal,
                           Constant(1, precision=ML_StdLogic),
                           specifier=Comparison.Equal,
                           precision=ML_Bool), reset_statement,
                stage_statement)

        # To meet simulation / synthesis tools, we build
        # a single if clock predicate block per stage
        clock_block = ConditionBlock(
            LogicalAnd(Event(clk, precision=ML_Bool),
                       Comparison(clk,
                                  Constant(1, precision=ML_StdLogic),
                                  specifier=Comparison.Equal,
                                  precision=ML_Bool),
                       precision=ML_Bool), stage_statement)

        if one_process_per_stage:
            clock_process = Process(clock_block, sensibility_list=[clk])
            entity.implementation.add_process(clock_process)
            first_process = first_process or clock_process
        else:
            clock_statement.add(clock_block)
    if one_process_per_stage:
        pass
    else:
        process_statement.add(clock_statement)
        pipeline_process = Process(process_statement, sensibility_list=[clk])
        entity.implementation.add_process(pipeline_process)
        first_process = pipeline_process
    # statement that gather signals which must be pre-computed
    for op in retime_map.pre_statement:
        first_process.add_to_pre_statement(op)
    stage_num = len(retime_map.stage_forward.keys())
    #print "there are %d pipeline stages" % (stage_num)
    return stage_num
Пример #10
0
    def get_array_test_wrapper(self,
                               test_num,
                               tested_function,
                               table_size_offset_array,
                               input_tables,
                               output_array,
                               acc_num,
                               post_statement_generator,
                               NUM_INPUT_ARRAY=1):
        """ generate a test loop for multi-array tests
             @param test_num number of elementary array tests to be executed
             @param tested_function FunctionObject to be tested
             @param table_size_offset_array ML_NewTable object containing
                    (table-size, offset) pairs for multi-array testing
             @param input_table ML_NewTable containing multi-array test inputs
             @param output_table ML_NewTable containing multi-array test outputs
             @param post_statement_generator is generator used to generate
                    a statement executed at the end of the test of one of the
                    arrays of the multi-test. It expects 6 arguments:
                    (input_tables, output_array, table_size_offset_array,
                     array_offset, array_len, test_id)
             @param printf_function FunctionObject to print error case
        """
        test_id = Variable("test_id",
                           precision=ML_Int32,
                           var_type=Variable.Local)
        test_num_cst = Constant(test_num, precision=ML_Int32, tag="test_num")

        array_len = Variable("len",
                             precision=ML_UInt32,
                             var_type=Variable.Local)

        array_offset = TableLoad(table_size_offset_array, test_id, 1)

        def pointer_add(table_addr, offset):
            pointer_format = table_addr.get_precision_as_pointer_format()
            return Addition(table_addr, offset, precision=pointer_format)

        array_inputs = tuple(
            pointer_add(input_tables[in_id], array_offset)
            for in_id in range(NUM_INPUT_ARRAY))
        function_call = tested_function(
            *((pointer_add(output_array, array_offset), ) + array_inputs +
              (array_len, )))

        post_statement = post_statement_generator(input_tables, output_array,
                                                  table_size_offset_array,
                                                  array_offset, array_len,
                                                  test_id)

        loop_increment = 1

        test_loop = Loop(
            ReferenceAssign(test_id, Constant(0, precision=ML_Int32)),
            test_id < test_num_cst,
            Statement(
                ReferenceAssign(array_len,
                                TableLoad(table_size_offset_array, test_id,
                                          0)),
                function_call,
                post_statement,
                ReferenceAssign(
                    acc_num, acc_num +
                    Conversion(array_len, precision=acc_num.precision)),
                ReferenceAssign(test_id, test_id + loop_increment),
            ),
        )

        test_statement = Statement()

        # adding functional test_loop to test statement
        test_statement.add(test_loop)

        return test_statement
Пример #11
0
def generate_pipeline_stage(entity, reset=False, recirculate=False, one_process_per_stage=True, synchronous_reset=True, negate_reset=False):
    """ Process a entity to generate pipeline stages required to implement
        pipeline structure described by node's stage attributes.

        :param entity: input entity to pipeline
        :type entity: ML_EntityBasis
        :param reset: indicate if a reset must be generated for pipeline registers
        :type reset: bool
        :param recirculate: trigger the integration of a recirculation signal to the stage
            flopping condition
        :type recirculate: bool
        :param one_process_per_stage:forces the generation of a separate process for each
               pipeline stage (else a unique process is generated for all the stages
        :type one_process_per_stage: bool
        :param synchronous_reset: triggers the generation of a clocked reset
        :type synchronous_reset: bool
        :param negate_reset: if set indicates the reset is triggered when reset signal is 0
                            (else 1)
        :type negate_reset: bool
    """
    retiming_map = {}
    retime_map = RetimeMap()
    output_assign_list = entity.implementation.get_output_assign()
    for output in output_assign_list:
        Log.report(Log.Verbose, "generating pipeline from output {} ", output)
        retime_op(output, retime_map)
    for recirculate_stage in entity.recirculate_signal_map:
        recirculate_ctrl = entity.recirculate_signal_map[recirculate_stage]
        Log.report(Log.Verbose, "generating pipeline from recirculation control signal {}", recirculate_ctrl)
        retime_op(recirculate_ctrl, retime_map)

    process_statement = Statement()

    # adding stage forward process
    clk = entity.get_clk_input()
    clock_statement = Statement()
    global_reset_statement = Statement()


    Log.report(Log.Info, "design has {} flip-flop(s).", retime_map.register_count)

    # handle towards the first clock Process (in generation order)
    # which must be the one whose pre_statement is filled with
    # signal required to be generated outside the processes
    first_process = False
    for stage_id in sorted(retime_map.stage_forward.keys()):
        stage_statement = Statement(
            *tuple(assign for assign in retime_map.stage_forward[stage_id]))

        if reset:
            reset_statement = Statement()
            for assign in retime_map.stage_forward[stage_id]:
                target = assign.get_input(0)
                reset_value = Constant(0, precision=target.get_precision())
                reset_statement.push(ReferenceAssign(target, reset_value))

            if recirculate:
                # inserting recirculation condition
                recirculate_signal = entity.get_recirculate_signal(stage_id)
                stage_statement = ConditionBlock(
                    Comparison(
                        recirculate_signal,
                        Constant(0, precision=recirculate_signal.get_precision()),
                        specifier=Comparison.Equal,
                        precision=ML_Bool
                    ),
                    stage_statement
                )

            if synchronous_reset:
                # build a compound statement with reset and flops statement
                stage_statement = ConditionBlock(
                    Comparison(
                        entity.reset_signal,
                        Constant(0 if negate_reset else 1, precision=ML_StdLogic),
                        specifier=Comparison.Equal, precision=ML_Bool
                    ),
                    reset_statement,
                    stage_statement
                )
            else:
                # for asynchronous reset, reset is in a non-clocked statement
                # and will be added at the end of stage to the same process than
                # register clocking
                global_reset_statement.add(reset_statement)

        # To meet simulation / synthesis tools, we build
        # a single if clock predicate block per stage
        clock_block = ConditionBlock(
            LogicalAnd(
                Event(clk, precision=ML_Bool),
                Comparison(
                    clk,
                    Constant(1, precision=ML_StdLogic),
                    specifier=Comparison.Equal,
                    precision=ML_Bool
                ),
                precision=ML_Bool
            ),
            stage_statement
        )

        if one_process_per_stage:
            if reset and not synchronous_reset:
                clock_block = ConditionBlock(
                    Comparison(
                        entity.reset_signal,
                        Constant(0 if negate_reset else 1, precision=ML_StdLogic),
                        specifier=Comparison.Equal, precision=ML_Bool
                    ),
                    reset_statement,
                    clock_block
                )
                clock_process = Process(clock_block, sensibility_list=[clk, entity.reset_signal])

            else:
                # no reset, or synchronous reset (already appended to clock_block)
                clock_process = Process(clock_block, sensibility_list=[clk])
            entity.implementation.add_process(clock_process)

            first_process = first_process or clock_process
        else:
            clock_statement.add(clock_block)
    if one_process_per_stage:
        # reset and clock processed where generated at each stage loop
        pass
    else:
        process_statement.add(clock_statement)
        if synchronous_reset:
            pipeline_process = Process(process_statement, sensibility_list=[clk])
        else:
            process_statement.add(global_reset_statement)
            pipeline_process = Process(process_statement, sensibility_list=[clk, entity.reset_signal])
        entity.implementation.add_process(pipeline_process)
        first_process = pipeline_process
    # statement that gather signals which must be pre-computed
    for op in retime_map.pre_statement:
        first_process.add_to_pre_statement(op)
    stage_num = len(retime_map.stage_forward.keys())
    Log.report(Log.Info, "there are {} pipeline stage(s)", stage_num)
    return stage_num
Пример #12
0
    def generate_auto_test(self,
                           test_num=10,
                           test_range=Interval(-1.0, 1.0),
                           debug=False,
                           time_step=10):
        """ time_step: duration of a stage (in ns) """
        # instanciating tested component
        # map of input_tag -> input_signal and output_tag -> output_signal
        io_map = {}
        # map of input_tag -> input_signal, excludind commodity signals
        # (e.g. clock and reset)
        input_signals = {}
        # map of output_tag -> output_signal
        output_signals = {}
        # excluding clock and reset signals from argument list
        # reduced_arg_list = [input_port for input_port in self.implementation.get_arg_list() if not input_port.get_tag() in ["clk", "reset"]]
        reduced_arg_list = self.implementation.get_arg_list()
        for input_port in reduced_arg_list:
            input_tag = input_port.get_tag()
            input_signal = Signal(input_tag + "_i",
                                  precision=input_port.get_precision(),
                                  var_type=Signal.Local)
            io_map[input_tag] = input_signal
            if not input_tag in ["clk", "reset"]:
                input_signals[input_tag] = input_signal
        for output_port in self.implementation.get_output_port():
            output_tag = output_port.get_tag()
            output_signal = Signal(output_tag + "_o",
                                   precision=output_port.get_precision(),
                                   var_type=Signal.Local)
            io_map[output_tag] = output_signal
            output_signals[output_tag] = output_signal

        # building list of test cases
        tc_list = []

        self_component = self.implementation.get_component_object()
        self_instance = self_component(io_map=io_map, tag="tested_entity")
        test_statement = Statement()

        # initializing random test case generator
        self.init_test_generator()

        # Appending standard test cases if required
        if self.auto_test_std:
            tc_list += self.standard_test_cases

        for i in range(test_num):
            input_values = self.generate_test_case(input_signals, io_map, i,
                                                   test_range)
            tc_list.append((input_values, None))

        def compute_results(tc):
            """ update test case with output values if required """
            input_values, output_values = tc
            if output_values is None:
                return input_values, self.numeric_emulate(input_values)
            else:
                return tc

        # filling output values
        tc_list = [compute_results(tc) for tc in tc_list]

        for input_values, output_values in tc_list:
            input_msg = ""

            # Adding input setting
            for input_tag in input_values:
                input_signal = io_map[input_tag]
                # FIXME: correct value generation depending on signal precision
                input_value = input_values[input_tag]
                test_statement.add(
                    ReferenceAssign(
                        input_signal,
                        Constant(input_value,
                                 precision=input_signal.get_precision())))
                value_msg = input_signal.get_precision().get_cst(
                    input_value, language=VHDL_Code).replace('"', "'")
                value_msg += " / " + hex(input_signal.get_precision(
                ).get_base_format().get_integer_coding(input_value))
                input_msg += " {}={} ".format(input_tag, value_msg)
            test_statement.add(Wait(time_step * self.stage_num))
            # Adding output value comparison
            for output_tag in output_signals:
                output_signal = output_signals[output_tag]
                output_value = Constant(
                    output_values[output_tag],
                    precision=output_signal.get_precision())
                output_precision = output_signal.get_precision()
                expected_dec = output_precision.get_cst(
                    output_values[output_tag],
                    language=VHDL_Code).replace('"', "'")
                expected_hex = " / " + hex(
                    output_precision.get_base_format().get_integer_coding(
                        output_values[output_tag]))
                value_msg = "{} / {}".format(expected_dec, expected_hex)

                test_pass_cond = Comparison(output_signal,
                                            output_value,
                                            specifier=Comparison.Equal,
                                            precision=ML_Bool)

                test_statement.add(
                    ConditionBlock(
                        LogicalNot(test_pass_cond, precision=ML_Bool),
                        Report(
                            Concatenation(
                                " result for {}: ".format(output_tag),
                                Conversion(TypeCast(
                                    output_signal,
                                    precision=ML_StdLogicVectorFormat(
                                        output_signal.get_precision(
                                        ).get_bit_size())),
                                           precision=ML_String),
                                precision=ML_String))))
                test_statement.add(
                    Assert(
                        test_pass_cond,
                        "\"unexpected value for inputs {input_msg}, output {output_tag}, expecting {value_msg}, got: \""
                        .format(input_msg=input_msg,
                                output_tag=output_tag,
                                value_msg=value_msg),
                        severity=Assert.Failure))

        testbench = CodeEntity("testbench")
        test_process = Process(
            test_statement,
            # end of test
            Assert(Constant(0, precision=ML_Bool),
                   " \"end of test, no error encountered \"",
                   severity=Assert.Failure))

        testbench_scheme = Statement(self_instance, test_process)

        if self.pipelined:
            half_time_step = time_step / 2
            assert (half_time_step * 2) == time_step
            # adding clock process for pipelined bench
            clk_process = Process(
                Statement(
                    ReferenceAssign(io_map["clk"],
                                    Constant(1, precision=ML_StdLogic)),
                    Wait(half_time_step),
                    ReferenceAssign(io_map["clk"],
                                    Constant(0, precision=ML_StdLogic)),
                    Wait(half_time_step),
                ))
            testbench_scheme.push(clk_process)

        testbench.add_process(testbench_scheme)

        return [testbench]
Пример #13
0
  def generate_datafile_testbench(self, tc_list, io_map, input_signals, output_signals, time_step, test_fname="test.input"):
    """ Generate testbench with input and output data externalized in
        a data file """
    # textio function to read hexadecimal text
    def FCT_HexaRead_gen(input_format):
        legalized_input_format = input_format
        FCT_HexaRead = FunctionObject("hread", [HDL_LINE, legalized_input_format], ML_Void, FunctionOperator("hread", void_function=True, arity=2))
        return FCT_HexaRead
    # textio function to read binary text
    FCT_Read = FunctionObject("read", [HDL_LINE, ML_StdLogic], ML_Void, FunctionOperator("read", void_function=True, arity=2))
    input_line = Variable("input_line", precision=HDL_LINE, var_type=Variable.Local)

    # building ordered list of input and output signal names
    input_signal_list = [sname for sname in input_signals.keys()]
    input_statement = Statement()
    for input_name in input_signal_list:
        input_format = input_signals[input_name].precision
        input_var = Variable(
            "v_" + input_name,
            precision=input_format,
            var_type=Variable.Local)
        if input_format is ML_StdLogic:
            input_statement.add(FCT_Read(input_line, input_var))
        else:
            input_statement.add(FCT_HexaRead_gen(input_format)(input_line, input_var))
        input_statement.add(ReferenceAssign(input_signals[input_name], input_var))

    output_signal_list = [sname for sname in output_signals.keys()]
    output_statement = Statement()
    for output_name in output_signal_list:
        output_format = output_signals[output_name].precision
        output_var = Variable(
            "v_" + output_name,
            precision=output_format,
            var_type=Variable.Local)
        if output_format is ML_StdLogic:
            output_statement.add(FCT_Read(input_line, output_var))
        else:
            output_statement.add(FCT_HexaRead_gen(output_format)(input_line, output_var))

        output_signal = output_signals[output_name]
        #value_msg = get_output_value_msg(output_signal, output_value)
        test_pass_cond, check_statement = get_output_check_statement(output_signal, output_name, output_var)

        input_msg = multi_Concatenation(*tuple(sum([[" %s=" % input_tag, signal_str_conversion(input_signals[input_tag], input_signals[input_tag].precision)] for input_tag in input_signal_list], [])))

        output_statement.add(check_statement)
        assert_statement = Assert(
            test_pass_cond,
            multi_Concatenation(
                "unexpected value for inputs ",
                input_msg,
                " expecting :",
                signal_str_conversion(output_var, output_format),
                " got :",
                signal_str_conversion(output_signal, output_format),
               precision = ML_String
            ),
            severity=Assert.Failure
        )
        output_statement.add(assert_statement)

    self_component = self.implementation.get_component_object()
    self_instance = self_component(io_map = io_map, tag = "tested_entity")
    test_statement = Statement()

    DATA_FILE_NAME = test_fname

    with open(DATA_FILE_NAME, "w") as data_file:
        # dumping column tags
        data_file.write("# " + " ".join(input_signal_list + output_signal_list) + "\n")

        def get_raw_cst_string(cst_format, cst_value):
            size = int((cst_format.get_bit_size() + 3) / 4)
            return ("{:x}").format(cst_format.get_base_format().get_integer_coding(cst_value)).zfill(size)

        for input_values, output_values in tc_list:
            # TODO; generate test data file
            cst_list = []
            for input_name in input_signal_list:
                input_value = input_values[input_name]
                input_format = input_signals[input_name].get_precision()
                cst_list.append(get_raw_cst_string(input_format, input_value))

            for output_name in output_signal_list:
                output_value = output_values[output_name]
                output_format = output_signals[output_name].get_precision()
                cst_list.append(get_raw_cst_string(output_format, output_value))
            # dumping line into file
            data_file.write(" ".join(cst_list) + "\n")

    input_stream = Variable("data_file", precision=HDL_FILE, var_type=Variable.Local)
    file_status = Variable("file_status", precision=HDL_OPEN_FILE_STATUS, var_type=Variable.Local)
    FCT_EndFile = FunctionObject("endfile", [HDL_FILE], ML_Bool, FunctionOperator("endfile", arity=1)) 
    FCT_OpenFile = FunctionObject(
        "FILE_OPEN", [HDL_OPEN_FILE_STATUS, HDL_FILE, ML_String], ML_Void,
        FunctionOperator(
            "FILE_OPEN",
            arg_map={0: FO_Arg(0), 1: FO_Arg(1), 2: FO_Arg(2), 3: "READ_MODE"},
            void_function=True))
    FCT_ReadLine =  FunctionObject(
        "readline", [HDL_FILE, HDL_LINE], ML_Void,
        FunctionOperator("readline", void_function=True, arity=2))

    reset_statement = self.get_reset_statement(io_map, time_step)
    OPEN_OK = Constant("OPEN_OK", precision=HDL_OPEN_FILE_STATUS)

    testbench = CodeEntity("testbench")
    test_process = Process(
        reset_statement,
        FCT_OpenFile(file_status, input_stream, DATA_FILE_NAME),
        ConditionBlock(
            Comparison(file_status, OPEN_OK, specifier=Comparison.NotEqual),
          Assert(
            Constant(0, precision=ML_Bool),
            " \"failed to open file {}\"".format(DATA_FILE_NAME),
            severity=Assert.Failure
          )
        ),
        # consume legend line
        FCT_ReadLine(input_stream, input_line),
        WhileLoop(
            LogicalNot(FCT_EndFile(input_stream)),
            Statement(
                FCT_ReadLine(input_stream, input_line),
                input_statement,
                Wait(time_step * (self.stage_num + 2)),
                output_statement,
            ),
        ),
      # end of test
      Assert(
        Constant(0, precision = ML_Bool),
        " \"end of test, no error encountered \"",
        severity = Assert.Warning
      ),
      # infinite end loop
        WhileLoop(
            Constant(1, precision=ML_Bool),
            Statement(
                Wait(time_step * (self.stage_num + 2)),
            )
        )
    )

    testbench_scheme = Statement(
      self_instance,
      test_process
    )

    if self.pipelined:
        half_time_step = time_step / 2
        assert (half_time_step * 2) == time_step
        # adding clock process for pipelined bench
        clk_process = Process(
            Statement(
                ReferenceAssign(
                    io_map["clk"],
                    Constant(1, precision = ML_StdLogic)
                ),
                Wait(half_time_step),
                ReferenceAssign(
                    io_map["clk"],
                    Constant(0, precision = ML_StdLogic)
                ),
                Wait(half_time_step),
            )
        )
        testbench_scheme.push(clk_process)

    testbench.add_process(testbench_scheme)

    return [testbench]