Пример #1
0
  def implement_test_case(self, io_map, input_values, output_signals, output_values, time_step):
      """ Implement the test case check and assertion whose I/Os values
          are described in input_values and output_values dict """
      test_statement = Statement()
      input_msg = ""
      # Adding input setting
      for input_tag in input_values:
        input_signal = io_map[input_tag]
        # FIXME: correct value generation depending on signal precision
        input_value = input_values[input_tag]
        test_statement.add(get_input_assign(input_signal, input_value))
        input_msg += get_input_msg(input_tag, input_signal, input_value)

      test_statement.add(Wait(time_step * (self.stage_num + 2)))

      # Adding output value comparison
      for output_tag in output_signals:
        output_signal = output_signals[output_tag]
        output_value = output_values[output_tag]
        output_cst_value  = Constant(output_value, precision=output_signal.get_precision())

        value_msg = get_output_value_msg(output_signal, output_value)
        test_pass_cond, check_statement = get_output_check_statement(output_signal, output_tag, output_cst_value)

        test_statement.add(check_statement)
        assert_statement = Assert(
          test_pass_cond,
          "\"unexpected value for inputs {input_msg}, output {output_tag}, expecting {value_msg}, got: \"".format(input_msg = input_msg, output_tag = output_tag, value_msg = value_msg),
          severity = Assert.Failure
        )
        test_statement.add(assert_statement)
      return test_statement
Пример #2
0
    def test_ref_assign(self):
        """ test behavior of StaticVectorizer on predicated ReferenceAssign """
        va = Variable("a")
        vb = Variable("b")
        vc = Variable("c")
        scheme = Statement(
            ReferenceAssign(va, Constant(3)),
            ConditionBlock(
                (va > vb).modify_attributes(likely=True),
                Statement(ReferenceAssign(vb, va),
                          ReferenceAssign(va, Constant(11)), Return(va)),
            ), ReferenceAssign(va, Constant(7)), Return(vb))
        vectorized_path = StaticVectorizer().extract_vectorizable_path(
            scheme, fallback_policy)

        linearized_most_likely_path = instanciate_variable(
            vectorized_path.linearized_optree,
            vectorized_path.variable_mapping)
        test_result = (isinstance(linearized_most_likely_path, Constant)
                       and linearized_most_likely_path.get_value() == 11)
        if not test_result:
            print("test UT_StaticVectorizer failure")
            print("scheme: {}".format(scheme.get_str()))
            print("linearized_most_likely_path: {}".format(
                linearized_most_likely_path))
        self.assertTrue(test_result)
Пример #3
0
  def generate_embedded_testbench(self, tc_list, io_map, input_signals, output_signals, time_step, test_fname="test.input"):
    """ Generate testbench with embedded input and output data """
    self_component = self.implementation.get_component_object()
    self_instance = self_component(io_map = io_map, tag = "tested_entity")
    test_statement = Statement()

    for index, (input_values, output_values) in enumerate(tc_list):
      test_statement.add(
          self.implement_test_case(io_map, input_values, output_signals, output_values, time_step, index=index)
      )

    reset_statement = self.get_reset_statement(io_map, time_step)

    testbench = CodeEntity("testbench")
    test_process = Process(
      reset_statement,
      test_statement,
      # end of test
      Assert(
        Constant(0, precision = ML_Bool),
        " \"end of test, no error encountered \"",
        severity = Assert.Warning
      ),
      # infinite end loop
        WhileLoop(
            Constant(1, precision=ML_Bool),
            Statement(
                Wait(time_step * (self.stage_num + 2)),
            )
        )
    )

    testbench_scheme = Statement(
      self_instance,
      test_process
    )

    if self.pipelined:
        half_time_step = time_step / 2
        assert (half_time_step * 2) == time_step
        # adding clock process for pipelined bench
        clk_process = Process(
            Statement(
                ReferenceAssign(
                    io_map["clk"],
                    Constant(1, precision = ML_StdLogic)
                ),
                Wait(half_time_step),
                ReferenceAssign(
                    io_map["clk"],
                    Constant(0, precision = ML_StdLogic)
                ),
                Wait(half_time_step),
            )
        )
        testbench_scheme.push(clk_process)

    testbench.add_process(testbench_scheme)

    return [testbench]
Пример #4
0
  def externalize_call(self, optree, arg_list, tag = "foo", result_format = None):
    # determining return format
    return_format = optree.get_precision() if result_format is None else result_format
    assert(not return_format is None and "external call result format must be defined")
    # function_name = self.main_code_object.declare_free_function_name(tag)
    function_name = self.name_factory.declare_free_function_name(tag)

    ext_function = CodeFunction(function_name, output_format = return_format)

    # creating argument copy
    arg_map = {}
    arg_index = 0
    for arg in arg_list:
      arg_tag = arg.get_tag(default = "arg_%d" % arg_index)
      arg_index += 1
      arg_map[arg] = ext_function.add_input_variable(arg_tag, arg.get_precision())

    # copying optree while swapping argument for variables
    optree_copy = optree.copy(copy_map = arg_map)
    # instanciating external function scheme
    if isinstance(optree, ML_ArithmeticOperation):
      function_optree = Statement(Return(optree_copy))
    else:
      function_optree = Statement(optree_copy)
    ext_function.set_scheme(function_optree)
    self.name_factory.declare_function(function_name, ext_function.get_function_object())

    return ext_function
 def expand_sub_ndrange(var_range_list, kernel):
     if len(var_range_list) == 0:
         pre_expanded_kernel = expand_kernel_expr(kernel)
         expanded_kernel, statement_list = extract_placeholder(
             pre_expanded_kernel)
         expanded_statement = Statement(*tuple(statement_list))
         print("expand_ndrange: ", expanded_kernel, statement_list)
         if not expanded_kernel is None:
             # append expanded_kernel at the Statement's end once
             # every PlaceHolder's dependency has been resolved
             expanded_statement.add(expanded_kernel)
         return expanded_statement
     else:
         var_range = var_range_list.pop(0)
         scheme = Loop(
             # init statement
             ReferenceAssign(var_range.var_index, var_range.first_index),
             # exit condition
             var_range.var_index <= var_range.last_index,
             # loop body
             Statement(
                 expand_sub_ndrange(var_range_list, kernel),
                 # loop iterator increment
                 ReferenceAssign(var_range.var_index, var_range.var_index +
                                 var_range.index_step)),
         )
     return scheme
Пример #6
0
def generate_function_from_optree(name_factory,
                                  optree,
                                  arg_list,
                                  tag="foo",
                                  result_format=None):
    """ Function which transform a sub-graph @p optree whose inputs are @p arg_list
        into a meta function
        @param optree operation graph to be incorporated as function boday
        @param arg_list list of @p optree's parameters to be used as function arguments
        @param name_factory engine to generate unique function name and to register function
        @param tag string to be used as seed to generate function name
        @param result_format hint to indicate function's return format (if optree is not
            an arithmetic operation (e.g. it already contains a Return node, then @p result_format
            must be used to specify the funciton return format)

        @return CodeFunction object containing the function implementation (plus the function
            would have been declared into name_factory)
        
        """
    # determining return format
    return_format = optree.get_precision(
    ) if result_format is None else result_format
    assert (not return_format is None
            and "external call result format must be defined")
    function_name = name_factory.declare_free_function_name(tag)

    ext_function = CodeFunction(function_name, output_format=return_format)

    # creating argument copy
    arg_map = {}
    arg_index = 0
    for arg in arg_list:
        arg_tag = arg.get_tag(default="arg_%d" % arg_index)
        arg_index += 1
        arg_map[arg] = ext_function.add_input_variable(arg_tag,
                                                       arg.get_precision())

    # extracting const table to make sure then are not duplicated
    table_set = extract_tables(optree)
    arg_map.update({table: table for table in table_set if table.const})

    # copying optree while swapping argument for variables
    optree_copy = optree.copy(copy_map=arg_map)
    # instanciating external function scheme
    if isinstance(optree, ML_ArithmeticOperation):
        function_optree = Statement(Return(optree_copy))
    else:
        function_optree = Statement(optree_copy)
    ext_function.set_scheme(function_optree)
    name_factory.declare_function(function_name,
                                  ext_function.get_function_object())

    return ext_function
Пример #7
0
    def generate_scheme(self):
        # declaring function input variable
        vx = self.implementation.add_input_variable("x", self.precision)
        vy = self.implementation.add_input_variable("y", self.precision)

        Cst0 = Constant(5, precision=self.precision)
        Cst1 = Constant(7, precision=self.precision)
        comp = Comparison(vx,
                          vy,
                          specifier=Comparison.Greater,
                          precision=ML_Bool,
                          tag="comp")
        comp_eq = Comparison(vx,
                             vy,
                             specifier=Comparison.Equal,
                             precision=ML_Bool,
                             tag="comp_eq")

        scheme = Statement(
            ConditionBlock(
                comp, Return(vy, precision=self.precision),
                ConditionBlock(
                    comp_eq,
                    Return(vx + vy * Cst0 - Cst1, precision=self.precision))),
            ConditionBlock(comp_eq, Return(Cst1 * vy,
                                           precision=self.precision)),
            Return(vx * vy, precision=self.precision))

        return scheme
Пример #8
0
 def ExpRaiseReturn(*args, **kwords):
     kwords["arg_value"] = vx
     kwords["function_name"] = self.function_name
     if self.libm_compliant:
         return RaiseReturn(*args, **kwords)
     else:
         return Statement()
    def generate_test_wrapper(self, tensor_descriptors, input_tables,
                              output_tables):
        auto_test = CodeFunction("test_wrapper", output_format=ML_Int32)

        tested_function = self.implementation.get_function_object()
        function_name = self.implementation.get_name()

        failure_report_op = FunctionOperator("report_failure")
        failure_report_function = FunctionObject("report_failure", [], ML_Void,
                                                 failure_report_op)

        printf_success_op = FunctionOperator(
            "printf",
            arg_map={0: "\"test successful %s\\n\"" % function_name},
            void_function=True,
            require_header=["stdio.h"])
        printf_success_function = FunctionObject("printf", [], ML_Void,
                                                 printf_success_op)

        # accumulate element number
        acc_num = Variable("acc_num",
                           precision=ML_Int64,
                           var_type=Variable.Local)

        test_loop = self.get_tensor_test_wrapper(
            tested_function, tensor_descriptors, input_tables, output_tables,
            acc_num, self.generate_tensor_check_loop)

        # common test scheme between scalar and vector functions
        test_scheme = Statement(test_loop, printf_success_function(),
                                Return(Constant(0, precision=ML_Int32)))
        auto_test.set_scheme(test_scheme)
        return FunctionGroup([auto_test])
def simplify_condition_block(node):
    assert isinstance(node, ConditionBlock)
    cond = node.get_input(0)
    if isinstance(cond, Constant):
        if cond.get_value():
            return Statement(
                node.get_pre_statement(),
                node.get_input(1)
            )
        elif len(node.inputs) >= 3:
            return Statement(
                node.get_pre_statement(),
                node.get_input(2)
            )

    return None
Пример #11
0
    def generate_scheme(self):
        size_format = ML_Int32

        # Matrix storage
        in_storage = self.implementation.add_input_variable(
            "buffer_in", ML_Pointer_Format(self.precision))
        kernel_storage = self.implementation.add_input_variable(
            "buffer_kernel", ML_Pointer_Format(self.precision))
        out_storage = self.implementation.add_input_variable(
            "buffer_out", ML_Pointer_Format(self.precision))

        # Matrix sizes
        w = self.implementation.add_input_variable("w", size_format)
        h = self.implementation.add_input_variable("h", size_format)

        # A is a (n x p) matrix in row-major
        tIn = Tensor(in_storage,
                     TensorDescriptor([w, h], [1, w], self.precision))
        # B is a (p x m) matrix in row-major
        kernel_strides = [1]
        for previous_dim in self.kernel_size[:-1]:
            kernel_strides.append(previous_dim * kernel_strides[-1])
        print("kernel_strides: {}".format(kernel_strides))
        tKernel = Tensor(
            kernel_storage,
            TensorDescriptor(self.kernel_size, kernel_strides, self.precision))
        # C is a (n x m) matrix in row-major
        tOut = Tensor(out_storage,
                      TensorDescriptor([w, h], [1, w], self.precision))

        index_format = ML_Int32

        # main NDRange description
        i = Variable("i", precision=index_format, var_type=Variable.Local)
        j = Variable("j", precision=index_format, var_type=Variable.Local)
        k_w = Variable("k_w", precision=index_format, var_type=Variable.Local)
        k_h = Variable("k_h", precision=index_format, var_type=Variable.Local)
        result = NDRange([IterRange(i, 0, w - 1),
                          IterRange(j, 0, h - 1)],
                         WriteAccessor(
                             tOut, [i, j],
                             Sum(Sum(Multiplication(
                                 ReadAccessor(tIn, [i + k_w, j - k_h],
                                              self.precision),
                                 ReadAccessor(tKernel, [k_w, k_h],
                                              self.precision)),
                                     IterRange(k_w,
                                               -(self.kernel_size[0] - 1) // 2,
                                               (self.kernel_size[0] - 1) // 2),
                                     precision=self.precision),
                                 IterRange(k_h,
                                           -(self.kernel_size[1] - 1) // 2,
                                           (self.kernel_size[1] - 1) // 2),
                                 precision=self.precision)))

        mdl_scheme = expand_ndrange(result)
        print("mdl_scheme:\n{}".format(mdl_scheme.get_str(depth=None)))
        return Statement(mdl_scheme, Return())
def expand_kernel_expr(kernel, iterator_format=ML_Int32):
    """ Expand a kernel expression into the corresponding MDL graph """
    if isinstance(kernel, NDRange):
        return expand_ndrange(kernel)
    elif isinstance(kernel, Sum):
        var_iter = kernel.index_iter_range.var_index
        # TODO/FIXME to be uniquified
        acc = Variable("acc",
                       var_type=Variable.Local,
                       precision=kernel.precision)
        # TODO/FIXME implement proper acc init
        if kernel.precision.is_vector_format():
            C0 = Constant([0] * kernel.precision.get_vector_size(),
                          precision=kernel.precision)
        else:
            C0 = Constant(0, precision=kernel.precision)
        scheme = Loop(
            Statement(
                ReferenceAssign(var_iter, kernel.index_iter_range.first_index),
                ReferenceAssign(acc, C0)),
            var_iter <= kernel.index_iter_range.last_index,
            Statement(
                ReferenceAssign(
                    acc,
                    Addition(acc,
                             expand_kernel_expr(kernel.elt_operation),
                             precision=kernel.precision)),
                # loop iterator increment
                ReferenceAssign(var_iter, var_iter +
                                kernel.index_iter_range.index_step)))
        return PlaceHolder(acc, scheme)
    elif isinstance(kernel, (ReadAccessor, WriteAccessor)):
        return expand_accessor(kernel)
    elif is_leaf_node(kernel):
        return kernel
    else:
        # vanilla metalibm ops are left unmodified (except
        # recursive expansion)
        for index, op in enumerate(kernel.inputs):
            new_op = expand_kernel_expr(op)
            kernel.set_input(index, new_op)
        return kernel
Пример #13
0
    def generate_scheme(self):
        var = self.implementation.add_input_variable("x", self.precision)
        var_y = self.implementation.add_input_variable("y", self.precision)
        var_z = self.implementation.add_input_variable("z", self.precision)
        mult = Multiplication(var, var_z, precision=self.precision)
        add = Addition(var_y, mult, precision=self.precision)

        test_program = Statement(
            add,
            Return(add)
        )
        return test_program
Пример #14
0
    def generate_scheme(self):
        vx = self.implementation.add_input_variable("x", FIXED_FORMAT)
        # declaring specific interval for input variable <x>
        vx.set_interval(Interval(-1, 1))

        acc_format = ML_Custom_FixedPoint_Format(6, 58, False)

        c = Constant(2, precision=acc_format, tag="C2")

        ivx = vx
        add_ivx = Addition(
                    c,
                    Multiplication(ivx, ivx, precision=acc_format, tag="mul"),
                    precision=acc_format,
                    tag="add"
                  )
        result = add_ivx

        input_mapping = {ivx: ivx.get_precision().round_sollya_object(0.125)}
        error_eval_map = runtime_error_eval.generate_error_eval_graph(result, input_mapping)

        # dummy scheme to make functionnal code generation
        scheme = Statement()
        for node in error_eval_map:
            scheme.add(error_eval_map[node])
        scheme.add(Return(result))
        return scheme
    def generate_tensor_check_loop(self, tensor_descriptors, input_tables,
                                   output_tables):
        # unpack tensor descriptors tuple
        (input_tensor_descriptor_list,
         output_tensor_descriptor_list) = tensor_descriptors
        # internal array iterator index
        vj = Variable("j", precision=ML_UInt32, var_type=Variable.Local)

        printf_error_detail_function = self.get_printf_error_detail_fct(
            output_tensor_descriptor_list[0])

        NUM_INPUT_ARRAY = len(input_tables)

        # generate the expected table for the whole multi-array
        expected_tables = self.generate_expected_table(tensor_descriptors,
                                                       input_tables)

        # global statement to list all checks
        check_statement = Statement()

        # implement check for each output tensor
        for out_id, out_td in enumerate(output_tensor_descriptor_list):
            # expected values for the (vj)-th entry of the sub-array
            expected_values = [
                TableLoad(expected_tables[out_id], vj, i)
                for i in range(self.accuracy.get_num_output_value())
            ]
            # local result for the (vj)-th entry of the sub-array
            local_result = TableLoad(output_tables[out_id], vj)

            array_len = out_td.get_bounding_size()

            if self.break_error:
                return_statement_break = Statement(
                    printf_error_detail_function(*((vj, ) + (local_result, ))),
                    self.accuracy.get_output_print_call(
                        self.function_name, output_values))
            else:
                return_statement_break = Statement(
                    printf_error_detail_function(*((vj, ) + (local_result, ))),
                    self.accuracy.get_output_print_call(
                        self.function_name, expected_values),
                    Return(Constant(1, precision=ML_Int32)))
            check_array_loop = Loop(
                ReferenceAssign(vj, 0), vj < array_len,
                Statement(
                    ConditionBlock(
                        self.accuracy.get_output_check_test(
                            local_result, expected_values),
                        return_statement_break),
                    ReferenceAssign(vj, vj + 1),
                ))
            check_statement.add(check_array_loop)
        return check_statement
Пример #16
0
    def generate_scalar_scheme(self, vx, vy):
        div = Division(vx, vy, precision=self.precision)
        div_if = Trunc(div, precision=self.precision)
        rem = Variable("rem",
                       var_type=Variable.Local,
                       precision=self.precision)
        qi = Variable("qi", var_type=Variable.Local, precision=self.precision)
        qi_bound = Constant(S2**self.precision.get_mantissa_size())
        init_rem = FusedMultiplyAdd(-div_if, vy, vx)

        # factorizing 1 / vy to save time
        # NOTES: it makes rem / vy approximate
        # shared_rcp = Division(1, vy, precision=self.precision)

        iterative_fmod = Loop(
            Statement(
                ReferenceAssign(rem, init_rem),
                ReferenceAssign(qi, div_if),
            ),
            Abs(qi) > qi_bound,
            Statement(
                ReferenceAssign(
                    qi,
                    #Trunc(shared_rcp * rem, precision=self.precision)
                    Trunc(rem / vy, precision=self.precision)),
                ReferenceAssign(rem, FMA(-qi, vy, rem))))
        scheme = Statement(
            rem,
            # shared_rcp,
            iterative_fmod,
            ConditionBlock(
                # if rem's sign and vx sign mismatch
                (rem * vx < 0.0).modify_attributes(tag="update_cond",
                                                   debug=debug_multi),
                Return(rem + vy),
                Return(rem),
            ))
        return scheme
Пример #17
0
    def generate_scheme(self):
        size_format = ML_Int32

        # Matrix storage
        A_storage = self.implementation.add_input_variable("buffer_a", ML_Pointer_Format(self.precision))
        B_storage = self.implementation.add_input_variable("buffer_b", ML_Pointer_Format(self.precision))
        C_storage = self.implementation.add_input_variable("buffer_c", ML_Pointer_Format(self.precision))

        # Matrix sizes
        n = self.implementation.add_input_variable("n", size_format)
        m = self.implementation.add_input_variable("m", size_format)
        p = self.implementation.add_input_variable("p", size_format)


        # A is a (n x p) matrix in row-major
        tA = Tensor(A_storage, TensorDescriptor([p, n], [1, p], self.precision))
        # B is a (p x m) matrix in row-major
        tB = Tensor(B_storage, TensorDescriptor([m, p], [1, m], self.precision))
        # C is a (n x m) matrix in row-major
        tC = Tensor(C_storage, TensorDescriptor([m, n], [1, m], self.precision))

        index_format = ML_Int32

        #
        i = Variable("i", precision=index_format, var_type=Variable.Local)
        j = Variable("j", precision=index_format, var_type=Variable.Local)
        k = Variable("k", precision=index_format, var_type=Variable.Local)
        result = NDRange(
            [IterRange(j, 0, m-1), IterRange(i, 0, n -1)],
            WriteAccessor(
                tC, [j, i],
                Sum(
                    Multiplication(
                        ReadAccessor(tA, [k, i], self.precision),
                        ReadAccessor(tB, [j, k], self.precision),
                        precision=self.precision),
                    IterRange(k, 0, p - 1),
                    precision=self.precision)))

        #mdl_scheme = expand_ndrange(exchange_loop_order(tile_ndrange(result, {j: 2, i: 2}), [1, 0]))
        if self.vectorize:
            mdl_scheme = expand_ndrange(vectorize_ndrange(result, j, 4))
        else:
            mdl_scheme = expand_ndrange(exchange_loop_order(tile_ndrange(result, {j: 2, i: 2}), [1, 0]))
        print("mdl_scheme:\n{}".format(mdl_scheme.get_str(depth=None, display_precision=True)))
        return Statement(
            mdl_scheme,
            Return()
        )
Пример #18
0
    def instanciate_graph(self,
                          op_graph,
                          memoization_map=None,
                          expand_div=False):
        """ instanciate function graph, replacing FunctionCall node
            by expanded function implementation """
        memoization_map = memoization_map or {}
        statement = Statement()

        def rec_instanciate(node):
            """ recursive internal function for function graph instanciation """
            new_node = None
            if node in memoization_map:
                return memoization_map[node]
            elif isinstance(node, FunctionCall):
                # recursively going through the input graph of FunctionCall for
                # instanciation
                for arg_index in range(node.get_function_object().arity):
                    input_node = rec_instanciate(node.get_input(arg_index))
                    if not input_node is None:
                        node.set_input(arg_index, input_node)
                result_var, fct_scheme = instanciate_fct_call(
                    node, self.precision)
                statement.add(
                    result_var
                )  # making sure result var is declared previously
                statement.add(fct_scheme)
                new_node = result_var
                new_node.set_interval(node.get_interval())
            elif isinstance(node, Division) and expand_div:
                new_node = FUNCTION_OBJECT_MAPPING["div"](node.get_input(0),
                                                          node.get_input(1))
                new_node.set_attributes(precision=node.get_precision(),
                                        interval=node.get_interval())
                new_node = rec_instanciate(new_node)
            elif is_leaf_node(node):
                # unmodified
                new_node = None
            else:
                for index, op in enumerate(node.get_inputs()):
                    new_op = rec_instanciate(op)
                    if not new_op is None:
                        node.set_input(index, new_op)
                statement.add(node)
            memoization_map[node] = new_node
            return new_node

        final_node = rec_instanciate(op_graph) or op_graph
        return final_node, statement
Пример #19
0
    def generate_scheme(self):
        # declare a new input parameters vx whose tag is "x" and
        # whose format is single precision
        vx = self.implementation.add_input_variable("x", self.get_input_precision(0))

        # declare a new input parameters vy whose tag is "y" and
        # whose format is single precision
        vy = self.implementation.add_input_variable("x", self.get_input_precision(0))

        # declare main operation graph for the meta-function:
        # a single Statement containing a single return statement which
        # the addition of the two inputs variable in single-precision
        main_scheme = Statement(
            Return(vx + vy, precision=ML_Binary32)
        )
        return main_scheme
Пример #20
0
def generate_pipeline_stage(entity):
    """ Process a entity to generate pipeline stages required """
    retiming_map = {}
    retime_map = RetimeMap()
    output_assign_list = entity.implementation.get_output_assign()
    for output in output_assign_list:
        Log.report(
            Log.Verbose,
            "generating pipeline from output %s " % (output.get_str(depth=1)))
        retime_op(output, retime_map)
    process_statement = Statement()

    # adding stage forward process
    clk = entity.get_clk_input()
    clock_statement = Statement()
    for stage_id in sorted(retime_map.stage_forward.keys()):
        stage_statement = Statement(*tuple(
            assign for assign in retime_map.stage_forward[stage_id]))
        clock_statement.add(stage_statement)
    # To meet simulation / synthesis tools, we build
    # a single if clock predicate block which contains all
    # the stage register allocation
    clock_block = ConditionBlock(
        LogicalAnd(Event(clk, precision=ML_Bool),
                   Comparison(clk,
                              Constant(1, precision=ML_StdLogic),
                              specifier=Comparison.Equal,
                              precision=ML_Bool),
                   precision=ML_Bool), clock_statement)
    process_statement.add(clock_block)
    pipeline_process = Process(process_statement, sensibility_list=[clk])
    for op in retime_map.pre_statement:
        pipeline_process.add_to_pre_statement(op)
    entity.implementation.add_process(pipeline_process)
    stage_num = len(retime_map.stage_forward.keys())
    #print "there are %d pipeline stages" % (stage_num)
    return stage_num
    def get_tensor_test_wrapper(self,
                                tested_function,
                                tensor_descriptors,
                                input_tables,
                                output_tables,
                                acc_num,
                                post_statement_generator,
                                NUM_INPUT_ARRAY=1):
        """ generate a test loop for multi-array tests
             @param test_num number of elementary array tests to be executed
             @param tested_function FunctionObject to be tested
             @param table_size_offset_array ML_NewTable object containing
                    (table-size, offset) pairs for multi-array testing
             @param input_table ML_NewTable containing multi-array test inputs
             @param output_table ML_NewTable containing multi-array test outputs
             @param post_statement_generator is generator used to generate
                    a statement executed at the end of the test of one of the
                    arrays of the multi-test. It expects 6 arguments:
                    (input_tables, output_array, table_size_offset_array,
                     array_offset, array_len, test_id)
             @param printf_function FunctionObject to print error case
        """
        array_len = Variable("len",
                             precision=ML_UInt32,
                             var_type=Variable.Local)

        def pointer_add(table_addr, offset):
            pointer_format = table_addr.get_precision_as_pointer_format()
            return Addition(table_addr, offset, precision=pointer_format)

        array_inputs = tuple(input_tables[in_id]
                             for in_id in range(NUM_INPUT_ARRAY))
        function_call = tested_function(*(self.get_ordered_arg_tuple(
            tensor_descriptors, input_tables, output_tables)))

        post_statement = post_statement_generator(tensor_descriptors,
                                                  input_tables, output_tables)

        test_statement = Statement(
            function_call,
            post_statement,
        )

        return test_statement
Пример #22
0
    def generate_emulate(self, result_ternary, result, mpfr_x, mpfr_rnd):
        """ generate the emulation code for ML_Log2 functions
            mpfr_x is a mpfr_t variable which should have the right precision
            mpfr_rnd is the rounding mode
        """
        emulate_func_name = "mpfr_exp"
        emulate_func_op = FunctionOperator(emulate_func_name,
                                           arg_map={
                                               0: FO_Arg(0),
                                               1: FO_Arg(1),
                                               2: FO_Arg(2)
                                           },
                                           require_header=["mpfr.h"])
        emulate_func = FunctionObject(emulate_func_name,
                                      [ML_Mpfr_t, ML_Mpfr_t, ML_Int32],
                                      ML_Int32, emulate_func_op)
        mpfr_call = Statement(
            ReferenceAssign(result_ternary,
                            emulate_func(result, mpfr_x, mpfr_rnd)))

        return mpfr_call
Пример #23
0
def vectorize_function_scheme(vectorizer,
                              name_factory,
                              scalar_scheme,
                              scalar_output_format,
                              scalar_arg_list,
                              vector_size,
                              sub_vector_size=None):
    """ Use a vectorization engine @p vectorizer to vectorize the sub-graph @p
        scalar_scheme, that is transforming and inputs and outputs from scalar
        to vectors and performing required internal path duplication """

    sub_vector_size = vector_size if sub_vector_size is None else sub_vector_size

    vec_arg_list, vector_scheme, vector_mask = \
        vectorizer.vectorize_scheme(scalar_scheme, scalar_arg_list,
                                    vector_size, sub_vector_size)

    vector_output_format = vectorize_format(scalar_output_format, vector_size)

    vec_res = Variable("vec_res",
                       precision=vector_output_format,
                       var_type=Variable.Local)

    vector_mask.set_attributes(tag="vector_mask", debug=debug_multi)

    callback_name = "scalar_callback"
    scalar_callback_fct = generate_function_from_optree(
        name_factory, scalar_scheme, scalar_arg_list, callback_name,
        scalar_output_format)
    scalar_callback = scalar_callback_fct.get_function_object()

    if no_scalar_fallback_required(vector_mask):
        function_scheme = Statement(
            Return(vector_scheme, precision=vector_output_format))
    function_scheme = generate_c_vector_wrapper(vector_size, vec_arg_list,
                                                vector_scheme, vector_mask,
                                                vec_res, scalar_callback)

    return vec_res, vec_arg_list, function_scheme, scalar_callback, scalar_callback_fct
Пример #24
0
def convert_bit_heap_to_fixed_point(current_bit_heap, signed=False):
    # final propagating sum
    op_index = 0
    op_list = []
    op_statement = Statement()
    while current_bit_heap.max_count() > 0:
        op_size = current_bit_heap.max_index - current_bit_heap.min_index + 1
        op_format = ML_StdLogicVectorFormat(op_size)
        op_reduce = Signal("op_%d" % op_index,
                           precision=op_format,
                           var_type=Variable.Local)

        offset_index = current_bit_heap.min_index

        for index in range(current_bit_heap.min_index,
                           current_bit_heap.max_index + 1):
            out_index = index - offset_index
            bit_list = current_bit_heap.pop_bits(index, 1)
            if len(bit_list) == 0:
                op_statement.push(
                    ReferenceAssign(BitSelection(op_reduce, out_index),
                                    Constant(0, precision=ML_StdLogic)))
            else:
                assert len(bit_list) == 1
                op_statement.push(
                    ReferenceAssign(BitSelection(op_reduce, out_index),
                                    bit_list[0]))

        op_precision = fixed_point(op_size + offset_index,
                                   -offset_index,
                                   signed=signed)
        op_list.append(
            PlaceHolder(TypeCast(op_reduce, precision=op_precision),
                        op_statement))
        op_index += 1
    return op_list, op_statement
Пример #25
0
    def generate_scheme(self):
        # declaring target and instantiating optimization engine
        vx = self.implementation.add_input_variable("x", self.precision)

        Log.set_dump_stdout(True)

        Log.report(Log.Info,
                   "\033[33;1m generating implementation scheme \033[0m")
        if self.debug_flag:
            Log.report(Log.Info, "\033[31;1m debug has been enabled \033[0;m")

        # local overloading of RaiseReturn operation
        def ExpRaiseReturn(*args, **kwords):
            kwords["arg_value"] = vx
            kwords["function_name"] = self.function_name
            if self.libm_compliant:
                return RaiseReturn(*args, precision=self.precision, **kwords)
            else:
                return Return(kwords["return_value"], precision=self.precision)

        test_nan_or_inf = Test(vx,
                               specifier=Test.IsInfOrNaN,
                               likely=False,
                               debug=debug_multi,
                               tag="nan_or_inf")
        test_nan = Test(vx,
                        specifier=Test.IsNaN,
                        debug=debug_multi,
                        tag="is_nan_test")
        test_positive = Comparison(vx,
                                   0,
                                   specifier=Comparison.GreaterOrEqual,
                                   debug=debug_multi,
                                   tag="inf_sign")

        test_signaling_nan = Test(vx,
                                  specifier=Test.IsSignalingNaN,
                                  debug=debug_multi,
                                  tag="is_signaling_nan")
        return_snan = Statement(
            ExpRaiseReturn(ML_FPE_Invalid,
                           return_value=FP_QNaN(self.precision)))

        # return in case of infinity input
        infty_return = Statement(
            ConditionBlock(
                test_positive,
                Return(FP_PlusInfty(self.precision), precision=self.precision),
                Return(FP_PlusZero(self.precision), precision=self.precision)))
        # return in case of specific value input (NaN or inf)
        specific_return = ConditionBlock(
            test_nan,
            ConditionBlock(
                test_signaling_nan, return_snan,
                Return(FP_QNaN(self.precision), precision=self.precision)),
            infty_return)
        # return in case of standard (non-special) input

        # exclusion of early overflow and underflow cases
        precision_emax = self.precision.get_emax()
        precision_max_value = S2 * S2**precision_emax
        exp_overflow_bound = sollya.ceil(log(precision_max_value))
        early_overflow_test = Comparison(vx,
                                         exp_overflow_bound,
                                         likely=False,
                                         specifier=Comparison.Greater)
        early_overflow_return = Statement(
            ClearException() if self.libm_compliant else Statement(),
            ExpRaiseReturn(ML_FPE_Inexact,
                           ML_FPE_Overflow,
                           return_value=FP_PlusInfty(self.precision)))

        precision_emin = self.precision.get_emin_subnormal()
        precision_min_value = S2**precision_emin
        exp_underflow_bound = floor(log(precision_min_value))

        early_underflow_test = Comparison(vx,
                                          exp_underflow_bound,
                                          likely=False,
                                          specifier=Comparison.Less)
        early_underflow_return = Statement(
            ClearException() if self.libm_compliant else Statement(),
            ExpRaiseReturn(ML_FPE_Inexact,
                           ML_FPE_Underflow,
                           return_value=FP_PlusZero(self.precision)))

        # constant computation
        invlog2 = self.precision.round_sollya_object(1 / log(2), sollya.RN)

        interval_vx = Interval(exp_underflow_bound, exp_overflow_bound)
        interval_fk = interval_vx * invlog2
        interval_k = Interval(floor(inf(interval_fk)),
                              sollya.ceil(sup(interval_fk)))

        log2_hi_precision = self.precision.get_field_size() - (
            sollya.ceil(log2(sup(abs(interval_k)))) + 2)
        Log.report(Log.Info, "log2_hi_precision: %d" % log2_hi_precision)
        invlog2_cst = Constant(invlog2, precision=self.precision)
        log2_hi = round(log(2), log2_hi_precision, sollya.RN)
        log2_lo = self.precision.round_sollya_object(
            log(2) - log2_hi, sollya.RN)

        # argument reduction
        unround_k = vx * invlog2
        unround_k.set_attributes(tag="unround_k", debug=debug_multi)
        k = NearestInteger(unround_k,
                           precision=self.precision,
                           debug=debug_multi)
        ik = NearestInteger(unround_k,
                            precision=self.precision.get_integer_format(),
                            debug=debug_multi,
                            tag="ik")
        ik.set_tag("ik")
        k.set_tag("k")
        exact_pre_mul = (k * log2_hi)
        exact_pre_mul.set_attributes(exact=True)
        exact_hi_part = vx - exact_pre_mul
        exact_hi_part.set_attributes(exact=True,
                                     tag="exact_hi",
                                     debug=debug_multi,
                                     prevent_optimization=True)
        exact_lo_part = -k * log2_lo
        exact_lo_part.set_attributes(tag="exact_lo",
                                     debug=debug_multi,
                                     prevent_optimization=True)
        r = exact_hi_part + exact_lo_part
        r.set_tag("r")
        r.set_attributes(debug=debug_multi)

        approx_interval = Interval(-log(2) / 2, log(2) / 2)

        approx_interval_half = approx_interval / 2
        approx_interval_split = [
            Interval(-log(2) / 2, inf(approx_interval_half)),
            approx_interval_half,
            Interval(sup(approx_interval_half),
                     log(2) / 2)
        ]

        # TODO: should be computed automatically
        exact_hi_interval = approx_interval
        exact_lo_interval = -interval_k * log2_lo

        opt_r = self.optimise_scheme(r, copy={})

        tag_map = {}
        self.opt_engine.register_nodes_by_tag(opt_r, tag_map)

        cg_eval_error_copy_map = {
            vx:
            Variable("x", precision=self.precision, interval=interval_vx),
            tag_map["k"]:
            Variable("k", interval=interval_k, precision=self.precision)
        }

        #try:
        if is_gappa_installed():
            eval_error = self.gappa_engine.get_eval_error_v2(
                self.opt_engine,
                opt_r,
                cg_eval_error_copy_map,
                gappa_filename="red_arg.g")
        else:
            eval_error = 0.0
            Log.report(Log.Warning,
                       "gappa is not installed in this environnement")
        Log.report(Log.Info, "eval error: %s" % eval_error)

        local_ulp = sup(ulp(sollya.exp(approx_interval), self.precision))
        # FIXME refactor error_goal from accuracy
        Log.report(Log.Info, "accuracy: %s" % self.accuracy)
        if isinstance(self.accuracy, ML_Faithful):
            error_goal = local_ulp
        elif isinstance(self.accuracy, ML_CorrectlyRounded):
            error_goal = S2**-1 * local_ulp
        elif isinstance(self.accuracy, ML_DegradedAccuracyAbsolute):
            error_goal = self.accuracy.goal
        elif isinstance(self.accuracy, ML_DegradedAccuracyRelative):
            error_goal = self.accuracy.goal
        else:
            Log.report(Log.Error, "unknown accuracy: %s" % self.accuracy)

        # error_goal = local_ulp #S2**-(self.precision.get_field_size()+1)
        error_goal_approx = S2**-1 * error_goal

        Log.report(Log.Info,
                   "\033[33;1m building mathematical polynomial \033[0m\n")
        poly_degree = max(
            sup(
                guessdegree(
                    expm1(sollya.x) / sollya.x, approx_interval,
                    error_goal_approx)) - 1, 2)
        init_poly_degree = poly_degree

        error_function = lambda p, f, ai, mod, t: dirtyinfnorm(f - p, ai)

        polynomial_scheme_builder = PolynomialSchemeEvaluator.generate_estrin_scheme
        #polynomial_scheme_builder = PolynomialSchemeEvaluator.generate_horner_scheme

        while 1:
            Log.report(Log.Info, "attempting poly degree: %d" % poly_degree)
            precision_list = [1] + [self.precision] * (poly_degree)
            poly_object, poly_approx_error = Polynomial.build_from_approximation_with_error(
                expm1(sollya.x),
                poly_degree,
                precision_list,
                approx_interval,
                sollya.absolute,
                error_function=error_function)
            Log.report(Log.Info, "polynomial: %s " % poly_object)
            sub_poly = poly_object.sub_poly(start_index=2)
            Log.report(Log.Info, "polynomial: %s " % sub_poly)

            Log.report(Log.Info, "poly approx error: %s" % poly_approx_error)

            Log.report(
                Log.Info,
                "\033[33;1m generating polynomial evaluation scheme \033[0m")
            pre_poly = polynomial_scheme_builder(
                poly_object, r, unified_precision=self.precision)
            pre_poly.set_attributes(tag="pre_poly", debug=debug_multi)

            pre_sub_poly = polynomial_scheme_builder(
                sub_poly, r, unified_precision=self.precision)
            pre_sub_poly.set_attributes(tag="pre_sub_poly", debug=debug_multi)

            poly = 1 + (exact_hi_part + (exact_lo_part + pre_sub_poly))
            poly.set_tag("poly")

            # optimizing poly before evaluation error computation
            #opt_poly = self.opt_engine.optimization_process(poly, self.precision, fuse_fma = fuse_fma)
            #opt_sub_poly = self.opt_engine.optimization_process(pre_sub_poly, self.precision, fuse_fma = fuse_fma)
            opt_poly = self.optimise_scheme(poly)
            opt_sub_poly = self.optimise_scheme(pre_sub_poly)

            # evaluating error of the polynomial approximation
            r_gappa_var = Variable("r",
                                   precision=self.precision,
                                   interval=approx_interval)
            exact_hi_gappa_var = Variable("exact_hi",
                                          precision=self.precision,
                                          interval=exact_hi_interval)
            exact_lo_gappa_var = Variable("exact_lo",
                                          precision=self.precision,
                                          interval=exact_lo_interval)
            vx_gappa_var = Variable("x",
                                    precision=self.precision,
                                    interval=interval_vx)
            k_gappa_var = Variable("k",
                                   interval=interval_k,
                                   precision=self.precision)

            #print "exact_hi interval: ", exact_hi_interval

            sub_poly_error_copy_map = {
                #r.get_handle().get_node(): r_gappa_var,
                #vx.get_handle().get_node():  vx_gappa_var,
                exact_hi_part.get_handle().get_node():
                exact_hi_gappa_var,
                exact_lo_part.get_handle().get_node():
                exact_lo_gappa_var,
                #k.get_handle().get_node(): k_gappa_var,
            }

            poly_error_copy_map = {
                exact_hi_part.get_handle().get_node(): exact_hi_gappa_var,
                exact_lo_part.get_handle().get_node(): exact_lo_gappa_var,
            }

            if is_gappa_installed():
                sub_poly_eval_error = -1.0
                sub_poly_eval_error = self.gappa_engine.get_eval_error_v2(
                    self.opt_engine,
                    opt_sub_poly,
                    sub_poly_error_copy_map,
                    gappa_filename="%s_gappa_sub_poly.g" % self.function_name)

                dichotomy_map = [
                    {
                        exact_hi_part.get_handle().get_node():
                        approx_interval_split[0],
                    },
                    {
                        exact_hi_part.get_handle().get_node():
                        approx_interval_split[1],
                    },
                    {
                        exact_hi_part.get_handle().get_node():
                        approx_interval_split[2],
                    },
                ]
                poly_eval_error_dico = self.gappa_engine.get_eval_error_v3(
                    self.opt_engine,
                    opt_poly,
                    poly_error_copy_map,
                    gappa_filename="gappa_poly.g",
                    dichotomy=dichotomy_map)

                poly_eval_error = max(
                    [sup(abs(err)) for err in poly_eval_error_dico])
            else:
                poly_eval_error = 0.0
                sub_poly_eval_error = 0.0
                Log.report(Log.Warning,
                           "gappa is not installed in this environnement")
                Log.report(Log.Info, "stopping autonomous degree research")
                # incrementing polynomial degree to counteract initial decrementation effect
                poly_degree += 1
                break
            Log.report(Log.Info, "poly evaluation error: %s" % poly_eval_error)
            Log.report(Log.Info,
                       "sub poly evaluation error: %s" % sub_poly_eval_error)

            global_poly_error = None
            global_rel_poly_error = None

            for case_index in range(3):
                poly_error = poly_approx_error + poly_eval_error_dico[
                    case_index]
                rel_poly_error = sup(
                    abs(poly_error /
                        sollya.exp(approx_interval_split[case_index])))
                if global_rel_poly_error == None or rel_poly_error > global_rel_poly_error:
                    global_rel_poly_error = rel_poly_error
                    global_poly_error = poly_error
            flag = error_goal > global_rel_poly_error

            if flag:
                break
            else:
                poly_degree += 1

        late_overflow_test = Comparison(ik,
                                        self.precision.get_emax(),
                                        specifier=Comparison.Greater,
                                        likely=False,
                                        debug=debug_multi,
                                        tag="late_overflow_test")
        overflow_exp_offset = (self.precision.get_emax() -
                               self.precision.get_field_size() / 2)
        diff_k = Subtraction(
            ik,
            Constant(overflow_exp_offset,
                     precision=self.precision.get_integer_format()),
            precision=self.precision.get_integer_format(),
            debug=debug_multi,
            tag="diff_k",
        )
        late_overflow_result = (ExponentInsertion(
            diff_k, precision=self.precision) * poly) * ExponentInsertion(
                overflow_exp_offset, precision=self.precision)
        late_overflow_result.set_attributes(silent=False,
                                            tag="late_overflow_result",
                                            debug=debug_multi,
                                            precision=self.precision)
        late_overflow_return = ConditionBlock(
            Test(late_overflow_result, specifier=Test.IsInfty, likely=False),
            ExpRaiseReturn(ML_FPE_Overflow,
                           return_value=FP_PlusInfty(self.precision)),
            Return(late_overflow_result, precision=self.precision))

        late_underflow_test = Comparison(k,
                                         self.precision.get_emin_normal(),
                                         specifier=Comparison.LessOrEqual,
                                         likely=False)
        underflow_exp_offset = 2 * self.precision.get_field_size()
        corrected_exp = Addition(
            ik,
            Constant(underflow_exp_offset,
                     precision=self.precision.get_integer_format()),
            precision=self.precision.get_integer_format(),
            tag="corrected_exp")
        late_underflow_result = (
            ExponentInsertion(corrected_exp, precision=self.precision) *
            poly) * ExponentInsertion(-underflow_exp_offset,
                                      precision=self.precision)
        late_underflow_result.set_attributes(debug=debug_multi,
                                             tag="late_underflow_result",
                                             silent=False)
        test_subnormal = Test(late_underflow_result,
                              specifier=Test.IsSubnormal)
        late_underflow_return = Statement(
            ConditionBlock(
                test_subnormal,
                ExpRaiseReturn(ML_FPE_Underflow,
                               return_value=late_underflow_result)),
            Return(late_underflow_result, precision=self.precision))

        twok = ExponentInsertion(ik,
                                 tag="exp_ik",
                                 debug=debug_multi,
                                 precision=self.precision)
        #std_result = twok * ((1 + exact_hi_part * pre_poly) + exact_lo_part * pre_poly)
        std_result = twok * poly
        std_result.set_attributes(tag="std_result", debug=debug_multi)
        result_scheme = ConditionBlock(
            late_overflow_test, late_overflow_return,
            ConditionBlock(late_underflow_test, late_underflow_return,
                           Return(std_result, precision=self.precision)))
        std_return = ConditionBlock(
            early_overflow_test, early_overflow_return,
            ConditionBlock(early_underflow_test, early_underflow_return,
                           result_scheme))

        # main scheme
        Log.report(Log.Info, "\033[33;1m MDL scheme \033[0m")
        scheme = ConditionBlock(
            test_nan_or_inf,
            Statement(ClearException() if self.libm_compliant else Statement(),
                      specific_return), std_return)

        return scheme
Пример #26
0
    def generate_auto_test(self,
                           test_num=10,
                           test_range=Interval(-1.0, 1.0),
                           debug=False,
                           time_step=10):
        """ time_step: duration of a stage (in ns) """
        # instanciating tested component
        # map of input_tag -> input_signal and output_tag -> output_signal
        io_map = {}
        # map of input_tag -> input_signal, excludind commodity signals
        # (e.g. clock and reset)
        input_signals = {}
        # map of output_tag -> output_signal
        output_signals = {}
        # excluding clock and reset signals from argument list
        # reduced_arg_list = [input_port for input_port in self.implementation.get_arg_list() if not input_port.get_tag() in ["clk", "reset"]]
        reduced_arg_list = self.implementation.get_arg_list()
        for input_port in reduced_arg_list:
            input_tag = input_port.get_tag()
            input_signal = Signal(input_tag + "_i",
                                  precision=input_port.get_precision(),
                                  var_type=Signal.Local)
            io_map[input_tag] = input_signal
            if not input_tag in ["clk", "reset"]:
                input_signals[input_tag] = input_signal
        for output_port in self.implementation.get_output_port():
            output_tag = output_port.get_tag()
            output_signal = Signal(output_tag + "_o",
                                   precision=output_port.get_precision(),
                                   var_type=Signal.Local)
            io_map[output_tag] = output_signal
            output_signals[output_tag] = output_signal

        # building list of test cases
        tc_list = []

        self_component = self.implementation.get_component_object()
        self_instance = self_component(io_map=io_map, tag="tested_entity")
        test_statement = Statement()

        # initializing random test case generator
        self.init_test_generator()

        # Appending standard test cases if required
        if self.auto_test_std:
            tc_list += self.standard_test_cases

        for i in range(test_num):
            input_values = self.generate_test_case(input_signals, io_map, i,
                                                   test_range)
            tc_list.append((input_values, None))

        def compute_results(tc):
            """ update test case with output values if required """
            input_values, output_values = tc
            if output_values is None:
                return input_values, self.numeric_emulate(input_values)
            else:
                return tc

        # filling output values
        tc_list = [compute_results(tc) for tc in tc_list]

        for input_values, output_values in tc_list:
            test_statement.add(
                self.implement_test_case(io_map, input_values, output_signals,
                                         output_values, time_step))

        testbench = CodeEntity("testbench")
        test_process = Process(
            test_statement,
            # end of test
            Assert(Constant(0, precision=ML_Bool),
                   " \"end of test, no error encountered \"",
                   severity=Assert.Failure))

        testbench_scheme = Statement(self_instance, test_process)

        if self.pipelined:
            half_time_step = time_step / 2
            assert (half_time_step * 2) == time_step
            # adding clock process for pipelined bench
            clk_process = Process(
                Statement(
                    ReferenceAssign(io_map["clk"],
                                    Constant(1, precision=ML_StdLogic)),
                    Wait(half_time_step),
                    ReferenceAssign(io_map["clk"],
                                    Constant(0, precision=ML_StdLogic)),
                    Wait(half_time_step),
                ))
            testbench_scheme.push(clk_process)

        testbench.add_process(testbench_scheme)

        return [testbench]
Пример #27
0
 def __init__(self, *args, **kw):
     Statement.__init__(self, *args, **kw)
     # indicate that the current basic block is final (end with
     # a Return like statement)
     self.final = False
Пример #28
0
    def generate_scheme(self):
        # declaring CodeFunction and retrieving input variable
        vx = self.implementation.add_input_variable("x", self.precision)

        table_size_log = self.table_size_log
        integer_size = 31
        integer_precision = ML_Int32

        max_bound = sup(abs(self.input_intervals[0]))
        max_bound_log = int(ceil(log2(max_bound)))
        Log.report(Log.Info, "max_bound_log=%s " % max_bound_log)
        scaling_power = integer_size - max_bound_log
        Log.report(Log.Info, "scaling power: %s " % scaling_power)

        storage_precision = ML_Custom_FixedPoint_Format(1, 30, signed=True)

        Log.report(Log.Info, "tabulating cosine and sine")
        # cosine and sine fused table
        fused_table = ML_NewTable(
            dimensions=[2**table_size_log, 2],
            storage_precision=storage_precision,
            tag="fast_lib_shared_table")  # self.uniquify_name("cossin_table"))
        # filling table
        for i in range(2**table_size_log):
            local_x = i / S2**table_size_log * S2**max_bound_log

            cos_local = cos(
                local_x
            )  # nearestint(cos(local_x) * S2**storage_precision.get_frac_size())

            sin_local = sin(
                local_x
            )  # nearestint(sin(local_x) * S2**storage_precision.get_frac_size())

            fused_table[i][0] = cos_local
            fused_table[i][1] = sin_local

        # argument reduction evaluation scheme
        # scaling_factor = Constant(S2**scaling_power, precision = self.precision)

        red_vx_precision = ML_Custom_FixedPoint_Format(31 - scaling_power,
                                                       scaling_power,
                                                       signed=True)
        Log.report(
            Log.Verbose, "red_vx_precision.get_c_bit_size()=%d" %
            red_vx_precision.get_c_bit_size())
        # red_vx = NearestInteger(vx * scaling_factor, precision = integer_precision)
        red_vx = Conversion(vx,
                            precision=red_vx_precision,
                            tag="red_vx",
                            debug=debug_fixed32)

        computation_precision = red_vx_precision  # self.precision
        output_precision = self.get_output_precision()
        Log.report(Log.Info,
                   "computation_precision is %s" % computation_precision)
        Log.report(Log.Info, "storage_precision     is %s" % storage_precision)
        Log.report(Log.Info, "output_precision      is %s" % output_precision)

        hi_mask_value = 2**32 - 2**(32 - table_size_log - 1)
        hi_mask = Constant(hi_mask_value, precision=ML_Int32)
        Log.report(Log.Info, "hi_mask=0x%x" % hi_mask_value)

        red_vx_hi_int = BitLogicAnd(TypeCast(red_vx, precision=ML_Int32),
                                    hi_mask,
                                    precision=ML_Int32,
                                    tag="red_vx_hi_int",
                                    debug=debugd)
        red_vx_hi = TypeCast(red_vx_hi_int,
                             precision=red_vx_precision,
                             tag="red_vx_hi",
                             debug=debug_fixed32)
        red_vx_lo = red_vx - red_vx_hi
        red_vx_lo.set_attributes(precision=red_vx_precision,
                                 tag="red_vx_lo",
                                 debug=debug_fixed32)
        table_index = BitLogicRightShift(TypeCast(red_vx, precision=ML_Int32),
                                         scaling_power -
                                         (table_size_log - max_bound_log),
                                         precision=ML_Int32,
                                         tag="table_index",
                                         debug=debugd)

        tabulated_cos = TableLoad(fused_table,
                                  table_index,
                                  0,
                                  tag="tab_cos",
                                  precision=storage_precision,
                                  debug=debug_fixed32)
        tabulated_sin = TableLoad(fused_table,
                                  table_index,
                                  1,
                                  tag="tab_sin",
                                  precision=storage_precision,
                                  debug=debug_fixed32)

        error_function = lambda p, f, ai, mod, t: dirtyinfnorm(f - p, ai)

        Log.report(Log.Info, "building polynomial approximation for cosine")
        # cosine polynomial approximation
        poly_interval = Interval(0, S2**(max_bound_log - table_size_log))
        Log.report(Log.Info, "poly_interval=%s " % poly_interval)
        cos_poly_degree = 2  # int(sup(guessdegree(cos(x), poly_interval, accuracy_goal)))

        Log.report(Log.Verbose, "cosine polynomial approximation")
        cos_poly_object, cos_approx_error = Polynomial.build_from_approximation_with_error(
            cos(sollya.x), [0, 2],
            [0] + [computation_precision.get_bit_size()],
            poly_interval,
            sollya.absolute,
            error_function=error_function)
        #cos_eval_scheme = PolynomialSchemeEvaluator.generate_horner_scheme(cos_poly_object, red_vx_lo, unified_precision = computation_precision)
        Log.report(Log.Info, "cos_approx_error=%e" % cos_approx_error)
        cos_coeff_list = cos_poly_object.get_ordered_coeff_list()
        coeff_C0 = cos_coeff_list[0][1]
        coeff_C2 = Constant(cos_coeff_list[1][1],
                            precision=ML_Custom_FixedPoint_Format(-1,
                                                                  32,
                                                                  signed=True))

        Log.report(Log.Info, "building polynomial approximation for sine")

        # sine polynomial approximation
        sin_poly_degree = 2  # int(sup(guessdegree(sin(x)/x, poly_interval, accuracy_goal)))
        Log.report(Log.Info, "sine poly degree: %e" % sin_poly_degree)
        Log.report(Log.Verbose, "sine polynomial approximation")
        sin_poly_object, sin_approx_error = Polynomial.build_from_approximation_with_error(
            sin(sollya.x) / sollya.x, [0, 2], [0] +
            [computation_precision.get_bit_size()] * (sin_poly_degree + 1),
            poly_interval,
            sollya.absolute,
            error_function=error_function)
        sin_coeff_list = sin_poly_object.get_ordered_coeff_list()
        coeff_S0 = sin_coeff_list[0][1]
        coeff_S2 = Constant(sin_coeff_list[1][1],
                            precision=ML_Custom_FixedPoint_Format(-1,
                                                                  32,
                                                                  signed=True))

        # scheme selection between sine and cosine
        if self.cos_output:
            scheme = self.generate_cos_scheme(computation_precision,
                                              tabulated_cos, tabulated_sin,
                                              coeff_S2, coeff_C2, red_vx_lo)
        else:
            scheme = self.generate_sin_scheme(computation_precision,
                                              tabulated_cos, tabulated_sin,
                                              coeff_S2, coeff_C2, red_vx_lo)

        result = Conversion(scheme, precision=self.get_output_precision())

        Log.report(
            Log.Verbose, "result operation tree :\n %s " % result.get_str(
                display_precision=True, depth=None, memoization_map={}))
        scheme = Statement(Return(result))

        return scheme
Пример #29
0
def generate_pipeline_stage(entity,
                            reset=False,
                            recirculate=False,
                            one_process_per_stage=True):
    """ Process a entity to generate pipeline stages required """
    retiming_map = {}
    retime_map = RetimeMap()
    output_assign_list = entity.implementation.get_output_assign()
    for output in output_assign_list:
        Log.report(Log.Verbose, "generating pipeline from output {} ", output)
        retime_op(output, retime_map)
    for recirculate_stage in entity.recirculate_signal_map:
        recirculate_ctrl = entity.recirculate_signal_map[recirculate_stage]
        Log.report(Log.Verbose,
                   "generating pipeline from recirculation control signal {}",
                   recirculate_ctrl)
        retime_op(recirculate_ctrl, retime_map)

    process_statement = Statement()

    # adding stage forward process
    clk = entity.get_clk_input()
    clock_statement = Statement()
    # handle towards the first clock Process (in generation order)
    # which must be the one whose pre_statement is filled with
    # signal required to be generated outside the processes
    first_process = False
    for stage_id in sorted(retime_map.stage_forward.keys()):
        stage_statement = Statement(*tuple(
            assign for assign in retime_map.stage_forward[stage_id]))

        if reset:
            reset_statement = Statement()
            for assign in retime_map.stage_forward[stage_id]:
                target = assign.get_input(0)
                reset_value = Constant(0, precision=target.get_precision())
                reset_statement.push(ReferenceAssign(target, reset_value))

            if recirculate:
                # inserting recirculation condition
                recirculate_signal = entity.get_recirculate_signal(stage_id)
                stage_statement = ConditionBlock(
                    Comparison(
                        recirculate_signal,
                        Constant(0,
                                 precision=recirculate_signal.get_precision()),
                        specifier=Comparison.Equal,
                        precision=ML_Bool), stage_statement)

            stage_statement = ConditionBlock(
                Comparison(entity.reset_signal,
                           Constant(1, precision=ML_StdLogic),
                           specifier=Comparison.Equal,
                           precision=ML_Bool), reset_statement,
                stage_statement)

        # To meet simulation / synthesis tools, we build
        # a single if clock predicate block per stage
        clock_block = ConditionBlock(
            LogicalAnd(Event(clk, precision=ML_Bool),
                       Comparison(clk,
                                  Constant(1, precision=ML_StdLogic),
                                  specifier=Comparison.Equal,
                                  precision=ML_Bool),
                       precision=ML_Bool), stage_statement)

        if one_process_per_stage:
            clock_process = Process(clock_block, sensibility_list=[clk])
            entity.implementation.add_process(clock_process)
            first_process = first_process or clock_process
        else:
            clock_statement.add(clock_block)
    if one_process_per_stage:
        pass
    else:
        process_statement.add(clock_statement)
        pipeline_process = Process(process_statement, sensibility_list=[clk])
        entity.implementation.add_process(pipeline_process)
        first_process = pipeline_process
    # statement that gather signals which must be pre-computed
    for op in retime_map.pre_statement:
        first_process.add_to_pre_statement(op)
    stage_num = len(retime_map.stage_forward.keys())
    #print "there are %d pipeline stages" % (stage_num)
    return stage_num
Пример #30
0
    def generate_scheme(self):
        # declaring function input variable
        v_x = [
            self.implementation.add_input_variable(
                "x%d" % index, self.get_input_precision(index))
            for index in range(self.arity)
        ]

        double_format = {
            ML_Binary32: ML_SingleSingle,
            ML_Binary64: ML_DoubleDouble
        }[self.precision]

        # testing Add211
        exact_add = Addition(v_x[0],
                             v_x[1],
                             precision=double_format,
                             tag="exact_add")
        # testing Mul211
        exact_mul = Multiplication(v_x[0],
                                   v_x[1],
                                   precision=double_format,
                                   tag="exact_mul")
        # testing Sub211
        exact_sub = Subtraction(v_x[1],
                                v_x[0],
                                precision=double_format,
                                tag="exact_sub")
        # testing Add222
        multi_add = Addition(exact_add,
                             exact_sub,
                             precision=double_format,
                             tag="multi_add")
        # testing Mul222
        multi_mul = Multiplication(multi_add,
                                   exact_mul,
                                   precision=double_format,
                                   tag="multi_mul")
        # testing Add221 and Add212 and Sub222
        multi_sub = Subtraction(Addition(exact_sub,
                                         v_x[1],
                                         precision=double_format,
                                         tag="add221"),
                                Addition(v_x[0],
                                         multi_mul,
                                         precision=double_format,
                                         tag="add212"),
                                precision=double_format,
                                tag="sub222")
        # testing Mul212 and Mul221
        mul212 = Multiplication(multi_sub,
                                v_x[0],
                                precision=double_format,
                                tag="mul212")
        mul221 = Multiplication(exact_mul,
                                v_x[1],
                                precision=double_format,
                                tag="mul221")
        # testing Sub221 and Sub212
        sub221 = Subtraction(mul212,
                             mul221.hi,
                             precision=double_format,
                             tag="sub221")
        sub212 = Subtraction(sub221,
                             mul212.lo,
                             precision=double_format,
                             tag="sub212")
        # testing FMA2111
        fma2111 = FMA(sub221.lo,
                      sub212.hi,
                      mul221.hi,
                      precision=double_format,
                      tag="fma2111")
        # testing FMA2112
        fma2112 = FMA(fma2111.lo,
                      fma2111.hi,
                      fma2111,
                      precision=double_format,
                      tag="fma2112")
        # testing FMA2212
        fma2212 = FMA(fma2112,
                      fma2112.hi,
                      fma2112,
                      precision=double_format,
                      tag="fma2212")
        # testing FMA2122
        fma2122 = FMA(fma2212.lo,
                      fma2212,
                      fma2212,
                      precision=double_format,
                      tag="fma2122")
        # testing FMA22222
        fma2222 = FMA(fma2122,
                      fma2212,
                      fma2111,
                      precision=double_format,
                      tag="fma2222")
        # testing Add122
        add122 = Addition(fma2222,
                          fma2222,
                          precision=self.precision,
                          tag="add122")
        # testing Add112
        add112 = Addition(add122,
                          fma2222,
                          precision=self.precision,
                          tag="add112")
        # testing Add121
        add121 = Addition(fma2222,
                          add112,
                          precision=self.precision,
                          tag="add121")
        # testing subnormalization
        multi_subnormalize = SpecificOperation(
            Addition(add121, add112, precision=double_format),
            Constant(3, precision=self.precision.get_integer_format()),
            specifier=SpecificOperation.Subnormalize,
            precision=double_format,
            tag="multi_subnormalize")
        result = Conversion(multi_subnormalize, precision=self.precision)

        scheme = Statement(Return(result))

        return scheme