예제 #1
0
 def __init__(self, inf_bound, sup_bound):
     self.inf_bound = inf_bound
     self.sup_bound = sup_bound
     self.zero_in_interval = 0 in sollya.Interval(
         inf_bound, sup_bound)
     self.min_exp = None if self.zero_in_interval else min(
         sollya.ceil(sollya.log2(abs(inf_bound))),
         sollya.ceil(sollya.log2(abs(sup_bound))))
     self.max_exp = max(sollya.ceil(sollya.log2(abs(inf_bound))),
                        sollya.ceil(sollya.log2(abs(sup_bound))))
예제 #2
0
 def get_smaller_format_min_error(self, ml_format):
     """ return the maximal accuracy / minimal error
         of the format just before @p ml_format in term
         of size """
     MIN_ERROR_MAP = {
         ML_Binary64: -sollya.log2(
             0
         ),  # no format smaller than ML_Binary6(switch to fp32 not managed)
         ML_DoubleDouble: S2**-53,
         ML_TripleDouble: S2**-106,
         ML_Binary32: -sollya.log2(0),  # no format smaller than ML_Binary32
         ML_SingleSingle: S2**-24,
         ML_TripleSingle: S2**-48,
     }
     return MIN_ERROR_MAP[ml_format]
예제 #3
0
        def split_domain(starting_domain, slivers):
            in_domains = [starting_domain]

            out_domains = list()
            while len(in_domains) > 0:
                I = in_domains.pop()
                unround_e = sollya.log2(I)
                e_low = sollya.floor(sollya.inf(unround_e))
                e_high = sollya.floor(sollya.sup(unround_e))
                #print("in: [{}, {}] ({}, {})".format(float(sollya.inf(I)), float(sollya.sup(I)), int(e_low), int(e_high)))
                if e_low == e_high:
                    #print("  accepted")
                    out_domains.append(I)
                    continue
                e_range = sollya.Interval(e_low, e_low+1)
                I_range = 2**e_range
                for _ in range(100):
                    mid = sollya.mid(I_range)
                    e = sollya.floor(sollya.log2(mid))
                    if e == e_low:
                        I_range = sollya.Interval(mid, sollya.sup(I_range))
                    else:
                        I_range = sollya.Interval(sollya.inf(I_range), mid)

                    divider_high = sollya.sup(I_range)
                    divider_low = sollya.inf(I_range)

                lower_part = sollya.Interval(sollya.inf(I), divider_low)
                upper_part = sollya.Interval(divider_high, sollya.sup(I))
                #print("  -> [{}, {}]".format(float(sollya.inf(lower_part)), float(sollya.sup(lower_part))))
                #print("  -> [{}, {}]".format(float(sollya.inf(upper_part)), float(sollya.sup(upper_part))))
                in_domains.append(upper_part)
                in_domains.append(lower_part)
            in_domains = out_domains

            # subdivide each section into 2**subd sections
            for _ in range(slivers):
                out_domains = list()
                for I in in_domains:
                    mid = sollya.mid(I)
                    out_domains.append(sollya.Interval(sollya.inf(I), mid))
                    out_domains.append(sollya.Interval(mid, sollya.sup(I)))
                in_domains = out_domains

            in_domains = set(in_domains)
            in_domains = sorted(in_domains, key=lambda x:float(sollya.inf(x)))
            in_domains = [d for d in in_domains if sollya.inf(d) != sollya.sup(d)]
            return in_domains
예제 #4
0
    def get_lzc_output_width(width):
        """ Compute the size of a standard leading zero count result for
        a width-bit output

        @param width [int] input width
        @return output width (in bits) """
        return int(floor(log2(width))) + 1
예제 #5
0
 def get_integer_coding(self, value, language=C_Code):
     if FP_SpecialValue.is_special_value(value):
         return self.get_special_value_coding(value, language)
     elif value == ml_infty:
         return self.get_special_value_coding(FP_PlusInfty(self), language)
     elif value == -ml_infty:
         return self.get_special_value_coding(FP_MinusInfty(self), language)
     else:
         value = sollya.round(value, self.get_sollya_object(), sollya.RN)
         # FIXME: managing negative zero
         sign = int(1 if value < 0 else 0)
         value = abs(value)
         if value == 0.0:
             Log.report(Log.Warning,
                        "+0.0 forced during get_integer_coding conversion")
             exp_biased = 0
             mant = 0
         else:
             exp = int(sollya.floor(sollya.log2(value)))
             exp_biased = int(exp - self.get_bias())
             if exp < self.get_emin_normal():
                 exp_biased = 0
                 mant = int((value / S2**self.get_emin_subnormal()))
             else:
                 mant = int(
                     (value / S2**exp - 1.0) * (S2**self.get_field_size()))
         return mant | (exp_biased << self.get_field_size()) | (
             sign << (self.get_field_size() + self.get_exponent_size()))
예제 #6
0
def solve_format_Constant(optree):
    """ Legalize Constant node """
    assert isinstance(optree, Constant)
    value = optree.get_value()
    if FP_SpecialValue.is_special_value(value):
        return optree.get_precision()
    elif not optree.get_precision() is None:
        # if precision is already set (manually forced), returns it
        return optree.get_precision()
    else:
        # fixed-point format solving
        frac_size = -1
        FRAC_THRESHOLD = 100 # maximum number of frac bit to be tested
        # TODO: fix
        for i in range(FRAC_THRESHOLD):
            if int(value*2**i) == value * 2**i:
                frac_size = i
                break
        if frac_size < 0:
            Log.report(Log.Error, "value {} is not an integer, from node:\n{}", value, optree)
        abs_value = abs(value)
        signed = value < 0
        # int_size = max(int(sollya.ceil(sollya.log2(abs_value+2**frac_size))), 0) + (1 if signed else 0)
        int_size = max(int(sollya.ceil(sollya.log2(abs_value + 1))), 0) + (1 if signed else 0)
        if frac_size == 0 and int_size == 0:
            int_size = 1
        return fixed_point(int_size, frac_size, signed=signed)
예제 #7
0
def solve_format_CLZ(optree):
    """ Legalize CountLeadingZeros precision
    
        Args:
            optree (CountLeadingZeros): input node
            
        Returns:
            ML_Format: legal format for CLZ
    """
    assert isinstance(optree, CountLeadingZeros)
    op_input = optree.get_input(0)
    input_precision = op_input.get_precision()

    if is_fixed_point(input_precision):
        if input_precision.get_signed():
            Log.report(Log.Warning , "signed format in solve_format_CLZ")
        # +1 for carry overflow
        int_size = int(sollya.floor(sollya.log2(input_precision.get_bit_size()))) + 1 
        frac_size = 0
        return fixed_point(
            int_size,
            frac_size,
            signed=False
        )
    else:
        Log.report(Log.Warning , "unsupported format in solve_format_CLZ")
        return optree.get_precision()
예제 #8
0
파일: rootn.py 프로젝트: metalibm/metalibm
 def numeric_emulate(self, vx, n):
     """ Numeric emulation of n-th root """
     if FP_SpecialValue.is_special_value(vx):
         if is_nan(vx):
             return FP_QNaN(self.precision)
         elif is_plus_infty(vx):
             return SOLLYA_INFTY
         elif is_minus_infty(vx):
             if int(n) % 2 == 1:
                 return vx
             else:
                 return FP_QNaN(self.precision)
         elif is_zero(vx):
             if int(n) % 2 != 0 and n < 0:
                 if is_plus_zero(vx):
                     return FP_PlusInfty(self.precision)
                 else:
                     return FP_MinusInfty(self.precision)
             elif int(n) % 2 == 0:
                 if n < 0:
                     return FP_PlusInfty(self.precision)
                 elif n > 0:
                     return FP_PlusZero(self.precision)
             return FP_QNaN(self.precision)
         else:
             raise NotImplementedError
     # OpenCL-C rootn, x < 0 and y odd: -exp2(log2(-x) / y)
     S2 = sollya.SollyaObject(2)
     if vx < 0:
         if int(n) % 2 != 0:
             if n > 0:
                 v = -bigfloat.root(
                     sollya.SollyaObject(-vx).bigfloat(), int(n))
             else:
                 v = -S2**(sollya.log2(-vx) / n)
         else:
             return FP_QNaN(self.precision)
     elif n < 0:
         # OpenCL-C definition
         v = S2**(sollya.log2(vx) / n)
     else:
         v = bigfloat.root(sollya.SollyaObject(vx).bigfloat(), int(n))
     return sollya.SollyaObject(v)
예제 #9
0
def get_accuracy_from_epsilon(epsilon):
    """ convert a numerical relative error into
        a number of accuracy bits

        :param epsilon: error to convert
        :type epsilon: number
        :return: accuracy corresponding to the error
        :rtype: SollyaObject
    """
    return sollya.floor(-sollya.log2(abs(epsilon)))
예제 #10
0
def get_fixed_type_from_interval(interval, precision):
    """ generate a fixed-point format which can encode
        @p interval without overflow, and which spans
        @p precision bits """
    lo = inf(interval)
    hi = sup(interval)
    signed = True if lo < 0 else False
    msb_index = int(floor(sollya.log2(max(abs(lo), abs(hi))))) + 1
    extra_digit = 1 if signed else 0
    return fixed_point(msb_index + extra_digit,
                       -(msb_index - precision),
                       signed=signed)
예제 #11
0
    def computeBoundPower(self, k, out_format, var_format):
        # TODO: fix
        epsilon = out_format.mp_node.epsilon
        eps_target = out_format.eps_target
        # to avoid derivating to larger and larger format when post-processing
        # powerings, we over-estimate the error while matching eps_target
        if eps_target > epsilon and epsilon > 0:
            # limiting error to limit precision explosion
            l_eps_target = sollya.log2(eps_target)
            l_epsilon = sollya.log2(epsilon)

            virtual_error_log = (l_eps_target + l_epsilon) / 2.0
            virtual_error = sollya.evaluate(
                sollya.SollyaObject(S2**(virtual_error_log)), 1)
            print(
                "lying on power_error target=2^{}, epsilon=2^{}, virtual_error=2^{} / {}"
                .format(l_eps_target, l_epsilon, virtual_error_log,
                        virtual_error))
            return virtual_error
        else:
            return sollya.SollyaObject(epsilon)
예제 #12
0
 def __init__(self, low_exp_value, max_exp_value, field_bits, precision):
     self.field_bits = field_bits
     self.low_exp_value = low_exp_value
     self.max_exp_value = max_exp_value
     exp_bits = int(
         sollya.ceil(sollya.log2(max_exp_value - low_exp_value + 1)))
     assert exp_bits >= 0 and field_bits >= 0 and (exp_bits +
                                                   field_bits) > 0
     self.exp_bits = exp_bits
     self.split_num = (self.max_exp_value - self.low_exp_value +
                       1) * 2**(self.field_bits)
     Log.report(Log.Debug, "split_num={}", self.split_num)
     self.precision = precision
예제 #13
0
    def generate_scheme(self):
        lzc_width = int(floor(log2(self.width))) + 1
        Log.report(Log.Info, "width of lzc out is {}".format(lzc_width))
        input_precision = ML_StdLogicVectorFormat(self.width)
        precision = ML_StdLogicVectorFormat(lzc_width)
        # declaring main input variable
        vx = self.implementation.add_input_signal("x", input_precision)
        vr_out = Signal("lzc", precision=precision, var_type=Variable.Local)
        tmp_lzc = Variable("tmp_lzc",
                           precision=precision,
                           var_type=Variable.Local)
        iterator = Variable("i", precision=ML_Integer, var_type=Variable.Local)
        lzc_loop = RangeLoop(
            iterator,
            Interval(0, self.width - 1),
            ConditionBlock(
                Comparison(VectorElementSelection(vx,
                                                  iterator,
                                                  precision=ML_StdLogic),
                           Constant(1, precision=ML_StdLogic),
                           specifier=Comparison.Equal,
                           precision=ML_Bool),
                ReferenceAssign(
                    tmp_lzc,
                    Conversion(Subtraction(Constant(self.width - 1,
                                                    precision=ML_Integer),
                                           iterator,
                                           precision=ML_Integer),
                               precision=precision),
                )),
            specifier=RangeLoop.Increasing,
        )
        lzc_process = Process(Statement(
            ReferenceAssign(tmp_lzc, Constant(self.width,
                                              precision=precision)), lzc_loop,
            ReferenceAssign(vr_out, tmp_lzc)),
                              sensibility_list=[vx])

        self.implementation.add_process(lzc_process)

        self.implementation.add_output_signal("vr_out", vr_out)

        return [self.implementation]
예제 #14
0
def solve_format_Constant(optree):
    """ Legalize Constant node """
    assert isinstance(optree, Constant)
    value = optree.get_value()
    if FP_SpecialValue.is_special_value(value):
        return optree.get_precision()
    elif not optree.get_precision() is None:
        # if precision is already set (manually forced), returns it
        return optree.get_precision()
    else:
        # fixed-point format solving
        assert int(value) == value
        abs_value = abs(value)
        signed = value < 0
        int_size = max(int(sollya.ceil(sollya.log2(abs_value + 1))),
                       0) + (1 if signed else 0)
        frac_size = 0
        if frac_size == 0 and int_size == 0:
            int_size = 1
        return fixed_point(int_size, frac_size, signed=signed)
예제 #15
0
 def computeNeededVariableFormat(self, I, epsTarget, variableFormat):
     if epsTarget > 0:
         # TODO: fix to support ML_Binary32
         if epsTarget >= self.MIN_LIMB_ERROR or variableFormat.mp_node.precision is self.limb_format:
             # FIXME: default to minimal precision (self.limb_format)
             return variableFormat
         else:
             target_accuracy = sollya.ceil(-sollya.log2(epsTarget))
             target_format = self.get_format_from_accuracy(
                 target_accuracy,
                 eps_target=epsTarget,
                 interval=variableFormat.mp_node.interval)
             if target_format.mp_node.precision.get_bit_size(
             ) < variableFormat.mp_node.precision.get_bit_size():
                 return target_format
             else:
                 # if variableFormat is smaller (less bits) and more accurate
                 # then we use it
                 return variableFormat
     else:
         return variableFormat
예제 #16
0
def determine_minimal_fixed_format_cst(value):
    """ determine the minimal size format which can encode
        exactly the constant value value """
    # fixed-point format solving
    frac_size = -1
    FRAC_THRESHOLD = 100  # maximum number of frac bit to be tested
    # TODO: fix
    for i in range(FRAC_THRESHOLD):
        if int(value * 2**i) == value * 2**i:
            frac_size = i
            break
    if frac_size < 0:
        Log.report(Log.Error, "value {} is not an integer, from node:\n{}",
                   value, optree)
    abs_value = abs(value)
    signed = value < 0
    # int_size = max(int(sollya.ceil(sollya.log2(abs_value+2**frac_size))), 0) + (1 if signed else 0)
    int_size = max(int(sollya.ceil(sollya.log2(abs_value + 1))),
                   0) + (1 if signed else 0)
    if frac_size == 0 and int_size == 0:
        int_size = 1
    return fixed_point(int_size, frac_size, signed=signed)
예제 #17
0
    def generate_scheme(self):
        vx = self.implementation.add_input_variable("x",
                                                    self.get_input_precision())

        sollya_precision = self.get_input_precision().get_sollya_object()

        # local overloading of RaiseReturn operation
        def ExpRaiseReturn(*args, **kwords):
            kwords["arg_value"] = vx
            kwords["function_name"] = self.function_name
            return RaiseReturn(*args, **kwords)

        # testing special value inputs
        test_nan_or_inf = Test(vx,
                               specifier=Test.IsInfOrNaN,
                               likely=False,
                               debug=True,
                               tag="nan_or_inf")
        test_nan = Test(vx,
                        specifier=Test.IsNaN,
                        debug=True,
                        tag="is_nan_test")
        test_positive = Comparison(vx,
                                   0,
                                   specifier=Comparison.GreaterOrEqual,
                                   debug=True,
                                   tag="inf_sign")
        test_signaling_nan = Test(vx,
                                  specifier=Test.IsSignalingNaN,
                                  debug=True,
                                  tag="is_signaling_nan")
        # if input is a signaling NaN, raise an invalid exception and returns
        # a quiet NaN
        return_snan = Statement(
            ExpRaiseReturn(ML_FPE_Invalid,
                           return_value=FP_QNaN(self.precision)))

        vx_exp = ExponentExtraction(vx, tag="vx_exp", debug=debugd)

        int_precision = self.precision.get_integer_format()

        # log2(vx)
        # r = vx_mant
        # e = vx_exp
        # vx reduced to r in [1, 2[
        # log2(vx) = log2(r * 2^e)
        #          = log2(r) + e
        #
        ## log2(r) is approximated by
        #  log2(r) = log2(inv_seed(r) * r / inv_seed(r)
        #          = log2(inv_seed(r) * r) - log2(inv_seed(r))
        # inv_seed(r) in ]1/2, 1] => log2(inv_seed(r)) in ]-1, 0]
        #
        # inv_seed(r) * r ~ 1
        # we can easily tabulate -log2(inv_seed(r))
        #

        # retrieving processor inverse approximation table
        dummy_var = Variable("dummy", precision=self.precision)
        dummy_div_seed = DivisionSeed(dummy_var, precision=self.precision)
        inv_approx_table = self.processor.get_recursive_implementation(
            dummy_div_seed,
            language=None,
            table_getter=lambda self: self.approx_table_map)
        # table creation
        table_index_size = 7
        log_table = ML_NewTable(dimensions=[2**table_index_size, 2],
                                storage_precision=self.precision,
                                tag=self.uniquify_name("inv_table"))
        # value for index 0 is set to 0.0
        log_table[0][0] = 0.0
        log_table[0][1] = 0.0
        for i in range(1, 2**table_index_size):
            #inv_value = (1.0 + (self.processor.inv_approx_table[i] / S2**9) + S2**-52) * S2**-1
            #inv_value = (1.0 + (inv_approx_table[i][0] / S2**9) ) * S2**-1
            #print inv_approx_table[i][0], inv_value
            inv_value = inv_approx_table[i][0]
            value_high_bitsize = self.precision.get_field_size() - (
                self.precision.get_exponent_size() + 1)
            value_high = round(log2(inv_value), value_high_bitsize, sollya.RN)
            value_low = round(
                log2(inv_value) - value_high, sollya_precision, sollya.RN)
            log_table[i][0] = value_high
            log_table[i][1] = value_low

        def compute_log(_vx, exp_corr_factor=None):
            _vx_mant = MantissaExtraction(_vx,
                                          tag="_vx_mant",
                                          precision=self.precision,
                                          debug=debug_lftolx)
            _vx_exp = ExponentExtraction(_vx, tag="_vx_exp", debug=debugd)

            # The main table is indexed by the 7 most significant bits
            # of the mantissa
            table_index = inv_approx_table.index_function(_vx_mant)
            table_index.set_attributes(tag="table_index", debug=debuglld)

            # argument reduction
            # Using AND -2 to exclude LSB set to 1 for Newton-Raphson convergence
            # TODO: detect if single operand inverse seed is supported by the targeted architecture
            pre_arg_red_index = TypeCast(BitLogicAnd(
                TypeCast(DivisionSeed(_vx_mant,
                                      precision=self.precision,
                                      tag="seed",
                                      debug=debug_lftolx,
                                      silent=True),
                         precision=ML_UInt64),
                Constant(-2, precision=ML_UInt64),
                precision=ML_UInt64),
                                         precision=self.precision,
                                         tag="pre_arg_red_index",
                                         debug=debug_lftolx)
            arg_red_index = Select(Equal(table_index, 0),
                                   1.0,
                                   pre_arg_red_index,
                                   tag="arg_red_index",
                                   debug=debug_lftolx)
            _red_vx = FMA(arg_red_index, _vx_mant, -1.0)
            _red_vx.set_attributes(tag="_red_vx", debug=debug_lftolx)
            inv_err = S2**-inv_approx_table.index_size
            red_interval = Interval(1 - inv_err, 1 + inv_err)

            # return in case of standard (non-special) input
            _log_inv_lo = TableLoad(log_table,
                                    table_index,
                                    1,
                                    tag="log_inv_lo",
                                    debug=debug_lftolx)
            _log_inv_hi = TableLoad(log_table,
                                    table_index,
                                    0,
                                    tag="log_inv_hi",
                                    debug=debug_lftolx)

            Log.report(Log.Verbose, "building mathematical polynomial")
            approx_interval = Interval(-inv_err, inv_err)
            poly_degree = sup(
                guessdegree(
                    log2(1 + sollya.x) / sollya.x, approx_interval, S2**
                    -(self.precision.get_field_size() * 1.1))) + 1
            sollya.settings.display = sollya.hexadecimal
            global_poly_object, approx_error = Polynomial.build_from_approximation_with_error(
                log2(1 + sollya.x) / sollya.x,
                poly_degree, [self.precision] * (poly_degree + 1),
                approx_interval,
                sollya.absolute,
                error_function=lambda p, f, ai, mod, t: sollya.dirtyinfnorm(
                    p - f, ai))
            Log.report(
                Log.Info, "poly_degree={}, approx_error={}".format(
                    poly_degree, approx_error))
            poly_object = global_poly_object.sub_poly(start_index=1, offset=1)
            #poly_object = global_poly_object.sub_poly(start_index=0,offset=0)

            Attributes.set_default_silent(True)
            Attributes.set_default_rounding_mode(ML_RoundToNearest)

            Log.report(Log.Verbose, "generating polynomial evaluation scheme")
            pre_poly = PolynomialSchemeEvaluator.generate_horner_scheme(
                poly_object, _red_vx, unified_precision=self.precision)
            _poly = FMA(pre_poly, _red_vx,
                        global_poly_object.get_cst_coeff(0, self.precision))
            _poly.set_attributes(tag="poly", debug=debug_lftolx)
            Log.report(
                Log.Verbose, "sollya global_poly_object: {}".format(
                    global_poly_object.get_sollya_object()))
            Log.report(
                Log.Verbose, "sollya poly_object: {}".format(
                    poly_object.get_sollya_object()))

            corr_exp = _vx_exp if exp_corr_factor == None else _vx_exp + exp_corr_factor

            Attributes.unset_default_rounding_mode()
            Attributes.unset_default_silent()

            pre_result = -_log_inv_hi + (_red_vx * _poly + (-_log_inv_lo))
            pre_result.set_attributes(tag="pre_result", debug=debug_lftolx)
            exact_log2_hi_exp = Conversion(corr_exp, precision=self.precision)
            exact_log2_hi_exp.set_attributes(tag="exact_log2_hi_hex",
                                             debug=debug_lftolx)
            _result = exact_log2_hi_exp + pre_result
            return _result, _poly, _log_inv_lo, _log_inv_hi, _red_vx

        result, poly, log_inv_lo, log_inv_hi, red_vx = compute_log(vx)
        result.set_attributes(tag="result", debug=debug_lftolx)

        # specific input value predicate
        neg_input = Comparison(vx,
                               0,
                               likely=False,
                               specifier=Comparison.Less,
                               debug=debugd,
                               tag="neg_input")
        vx_nan_or_inf = Test(vx,
                             specifier=Test.IsInfOrNaN,
                             likely=False,
                             debug=debugd,
                             tag="nan_or_inf")
        vx_snan = Test(vx,
                       specifier=Test.IsSignalingNaN,
                       likely=False,
                       debug=debugd,
                       tag="vx_snan")
        vx_inf = Test(vx,
                      specifier=Test.IsInfty,
                      likely=False,
                      debug=debugd,
                      tag="vx_inf")
        vx_subnormal = Test(vx,
                            specifier=Test.IsSubnormal,
                            likely=False,
                            debug=debugd,
                            tag="vx_subnormal")
        vx_zero = Test(vx,
                       specifier=Test.IsZero,
                       likely=False,
                       debug=debugd,
                       tag="vx_zero")

        exp_mone = Equal(vx_exp,
                         -1,
                         tag="exp_minus_one",
                         debug=debugd,
                         likely=False)
        vx_one = Equal(vx, 1.0, tag="vx_one", likely=False, debug=debugd)

        # Specific specific for the case exp == -1
        # log2(x) = log2(m) - 1
        #
        # as m in [1, 2[, log2(m) in [0, 1[
        # if r is close to 2, a catastrophic cancellation can occur
        #
        # r = seed(m)
        # log2(x) = log2(seed(m) * m / seed(m)) - 1
        #         = log2(seed(m) * m) - log2(seed(m)) - 1
        #
        # for m really close to 2 => seed(m) = 0.5
        #     => log2(x) = log2(0.5 * m)
        #                =
        result_exp_m1 = (-log_inv_hi - 1.0) + FMA(poly, red_vx, -log_inv_lo)
        result_exp_m1.set_attributes(tag="result_exp_m1", debug=debug_lftolx)

        m100 = -100
        S2100 = Constant(S2**100, precision=self.precision)
        result_subnormal, _, _, _, _ = compute_log(vx * S2100,
                                                   exp_corr_factor=m100)
        result_subnormal.set_attributes(tag="result_subnormal",
                                        debug=debug_lftolx)

        one_err = S2**-7
        approx_interval_one = Interval(-one_err, one_err)
        red_vx_one = vx - 1.0
        poly_degree_one = sup(
            guessdegree(
                log(1 + x) / x, approx_interval_one, S2**
                -(self.precision.get_field_size() + 1))) + 1
        poly_object_one = Polynomial.build_from_approximation(
            log(1 + sollya.x) / sollya.x, poly_degree_one,
            [self.precision] * (poly_degree_one + 1), approx_interval_one,
            absolute).sub_poly(start_index=1)
        poly_one = PolynomialSchemeEvaluator.generate_horner_scheme(
            poly_object_one, red_vx_one, unified_precision=self.precision)
        poly_one.set_attributes(tag="poly_one", debug=debug_lftolx)
        result_one = red_vx_one + red_vx_one * poly_one
        cond_one = (vx < (1 + one_err)) & (vx > (1 - one_err))
        cond_one.set_attributes(tag="cond_one", debug=debugd, likely=False)

        # main scheme
        pre_scheme = ConditionBlock(
            neg_input,
            Statement(ClearException(), Raise(ML_FPE_Invalid),
                      Return(FP_QNaN(self.precision))),
            ConditionBlock(
                vx_nan_or_inf,
                ConditionBlock(
                    vx_inf,
                    Statement(
                        ClearException(),
                        Return(FP_PlusInfty(self.precision)),
                    ),
                    Statement(ClearException(),
                              ConditionBlock(vx_snan, Raise(ML_FPE_Invalid)),
                              Return(FP_QNaN(self.precision)))),
                ConditionBlock(
                    vx_subnormal,
                    ConditionBlock(
                        vx_zero,
                        Statement(
                            ClearException(),
                            Raise(ML_FPE_DivideByZero),
                            Return(FP_MinusInfty(self.precision)),
                        ),
                        Statement(ClearException(), result_subnormal,
                                  Return(result_subnormal))),
                    ConditionBlock(
                        vx_one,
                        Statement(
                            ClearException(),
                            Return(FP_PlusZero(self.precision)),
                        ),
                        ConditionBlock(exp_mone, Return(result_exp_m1),
                                       Return(result))))))
        scheme = Statement(result, pre_scheme)
        return scheme
예제 #18
0
 def numeric_emulate(self, input_value):
     """ Numeric emulation to generate expected value
     corresponding to input_value input """
     return log2(input_value)
예제 #19
0
        def compute_log(_vx, exp_corr_factor=None):
            _vx_mant = MantissaExtraction(_vx,
                                          tag="_vx_mant",
                                          precision=self.precision,
                                          debug=debug_lftolx)
            _vx_exp = ExponentExtraction(_vx, tag="_vx_exp", debug=debugd)

            # The main table is indexed by the 7 most significant bits
            # of the mantissa
            table_index = inv_approx_table.index_function(_vx_mant)
            table_index.set_attributes(tag="table_index", debug=debuglld)

            # argument reduction
            # Using AND -2 to exclude LSB set to 1 for Newton-Raphson convergence
            # TODO: detect if single operand inverse seed is supported by the targeted architecture
            pre_arg_red_index = TypeCast(BitLogicAnd(
                TypeCast(DivisionSeed(_vx_mant,
                                      precision=self.precision,
                                      tag="seed",
                                      debug=debug_lftolx,
                                      silent=True),
                         precision=ML_UInt64),
                Constant(-2, precision=ML_UInt64),
                precision=ML_UInt64),
                                         precision=self.precision,
                                         tag="pre_arg_red_index",
                                         debug=debug_lftolx)
            arg_red_index = Select(Equal(table_index, 0),
                                   1.0,
                                   pre_arg_red_index,
                                   tag="arg_red_index",
                                   debug=debug_lftolx)
            _red_vx = FMA(arg_red_index, _vx_mant, -1.0)
            _red_vx.set_attributes(tag="_red_vx", debug=debug_lftolx)
            inv_err = S2**-inv_approx_table.index_size
            red_interval = Interval(1 - inv_err, 1 + inv_err)

            # return in case of standard (non-special) input
            _log_inv_lo = TableLoad(log_table,
                                    table_index,
                                    1,
                                    tag="log_inv_lo",
                                    debug=debug_lftolx)
            _log_inv_hi = TableLoad(log_table,
                                    table_index,
                                    0,
                                    tag="log_inv_hi",
                                    debug=debug_lftolx)

            Log.report(Log.Verbose, "building mathematical polynomial")
            approx_interval = Interval(-inv_err, inv_err)
            poly_degree = sup(
                guessdegree(
                    log2(1 + sollya.x) / sollya.x, approx_interval, S2**
                    -(self.precision.get_field_size() * 1.1))) + 1
            sollya.settings.display = sollya.hexadecimal
            global_poly_object, approx_error = Polynomial.build_from_approximation_with_error(
                log2(1 + sollya.x) / sollya.x,
                poly_degree, [self.precision] * (poly_degree + 1),
                approx_interval,
                sollya.absolute,
                error_function=lambda p, f, ai, mod, t: sollya.dirtyinfnorm(
                    p - f, ai))
            Log.report(
                Log.Info, "poly_degree={}, approx_error={}".format(
                    poly_degree, approx_error))
            poly_object = global_poly_object.sub_poly(start_index=1, offset=1)
            #poly_object = global_poly_object.sub_poly(start_index=0,offset=0)

            Attributes.set_default_silent(True)
            Attributes.set_default_rounding_mode(ML_RoundToNearest)

            Log.report(Log.Verbose, "generating polynomial evaluation scheme")
            pre_poly = PolynomialSchemeEvaluator.generate_horner_scheme(
                poly_object, _red_vx, unified_precision=self.precision)
            _poly = FMA(pre_poly, _red_vx,
                        global_poly_object.get_cst_coeff(0, self.precision))
            _poly.set_attributes(tag="poly", debug=debug_lftolx)
            Log.report(
                Log.Verbose, "sollya global_poly_object: {}".format(
                    global_poly_object.get_sollya_object()))
            Log.report(
                Log.Verbose, "sollya poly_object: {}".format(
                    poly_object.get_sollya_object()))

            corr_exp = _vx_exp if exp_corr_factor == None else _vx_exp + exp_corr_factor

            Attributes.unset_default_rounding_mode()
            Attributes.unset_default_silent()

            pre_result = -_log_inv_hi + (_red_vx * _poly + (-_log_inv_lo))
            pre_result.set_attributes(tag="pre_result", debug=debug_lftolx)
            exact_log2_hi_exp = Conversion(corr_exp, precision=self.precision)
            exact_log2_hi_exp.set_attributes(tag="exact_log2_hi_hex",
                                             debug=debug_lftolx)
            _result = exact_log2_hi_exp + pre_result
            return _result, _poly, _log_inv_lo, _log_inv_hi, _red_vx
예제 #20
0
  def generate_scheme(self):
    # declaring target and instantiating optimization engine

    vx = self.implementation.add_input_variable("x", self.precision)
    
    Log.set_dump_stdout(True)
    
    Log.report(Log.Info, "\033[33;1m generating implementation scheme \033[0m")
    if self.debug_flag: 
        Log.report(Log.Info, "\033[31;1m debug has been enabled \033[0;m")
    
    # local overloading of RaiseReturn operation
    def ExpRaiseReturn(*args, **kwords):
        kwords["arg_value"] = vx
        kwords["function_name"] = self.function_name
        return RaiseReturn(*args, **kwords)
    
    C_m1 = Constant(-1, precision = self.precision)
    
    test_NaN_or_inf = Test(vx, specifier = Test.IsInfOrNaN, likely = False, debug = debug_multi, tag = "NaN_or_inf", precision = ML_Bool)
    test_NaN = Test(vx, specifier = Test.IsNaN, likely = False, debug = debug_multi, tag = "is_NaN", precision = ML_Bool)
    test_inf = Comparison(vx, 0, specifier = Comparison.Greater, debug = debug_multi, tag = "sign", precision = ML_Bool, likely = False);
    
    #  Infnty input
    infty_return = Statement(ConditionBlock(test_inf, Return(FP_PlusInfty(self.precision)), Return(C_m1)))
    #  non-std input (inf/nan)
    specific_return = ConditionBlock(test_NaN, Return(FP_QNaN(self.precision)), infty_return)
    
    # Over/Underflow Tests
    
    precision_emax = self.precision.get_emax()
    precision_max_value = S2**(precision_emax + 1)
    expm1_overflow_bound = ceil(log(precision_max_value + 1))
    overflow_test = Comparison(vx, expm1_overflow_bound, likely = False, specifier = Comparison.Greater, precision = ML_Bool)
    overflow_return = Statement(Return(FP_PlusInfty(self.precision)))
    
    precision_emin = self.precision.get_emin_subnormal()
    precision_min_value = S2** precision_emin
    expm1_underflow_bound = floor(log(precision_min_value) + 1)
    underflow_test = Comparison(vx, expm1_underflow_bound, likely = False, specifier = Comparison.Less, precision = ML_Bool)
    underflow_return = Statement(Return(C_m1))
    
    sollya_precision = {ML_Binary32: sollya.binary32, ML_Binary64: sollya.binary64}[self.precision]
    int_precision = {ML_Binary32: ML_Int32, ML_Binary64: ML_Int64}[self.precision]
    
    # Constants
    
    log_2 = round(log(2), sollya_precision, sollya.RN)
    invlog2 = round(1/log(2), sollya_precision, sollya.RN)
    log_2_cst = Constant(log_2, precision = self.precision)
    
    interval_vx = Interval(expm1_underflow_bound, expm1_overflow_bound)
    interval_fk = interval_vx * invlog2
    interval_k = Interval(floor(inf(interval_fk)), ceil(sup(interval_fk)))
    
    log2_hi_precision = self.precision.get_field_size() - 6
    log2_hi = round(log(2), log2_hi_precision, sollya.RN)
    log2_lo = round(log(2) - log2_hi, sollya_precision, sollya.RN)


    # Reduction
    unround_k = vx * invlog2
    ik = NearestInteger(unround_k, precision = int_precision, debug = debug_multi, tag = "ik")
    k = Conversion(ik, precision = self.precision, tag = "k")
    
    red_coeff1 = Multiplication(k, log2_hi, precision = self.precision)
    red_coeff2 = Multiplication(Negation(k, precision = self.precision), log2_lo, precision = self.precision)
    
    pre_sub_mul = Subtraction(vx, red_coeff1, precision  = self.precision)
    
    s = Addition(pre_sub_mul, red_coeff2, precision = self.precision)
    z = Subtraction(s, pre_sub_mul, precision = self.precision)
    t = Subtraction(red_coeff2, z, precision = self.precision)
    
    r = Addition(s, t, precision = self.precision)
    
    r.set_attributes(tag = "r", debug = debug_multi)
    
    r_interval = Interval(-log_2/S2, log_2/S2)
    
    local_ulp = sup(ulp(exp(r_interval), self.precision))
    
    print("ulp: ", local_ulp)
    error_goal = S2**-1*local_ulp
    print("error goal: ", error_goal)
    
    
    # Polynomial Approx
    error_function = lambda p, f, ai, mod, t: dirtyinfnorm(f - p, ai)
    Log.report(Log.Info, "\033[33;1m Building polynomial \033[0m\n")
    
    poly_degree = sup(guessdegree(expm1(sollya.x), r_interval, error_goal) + 1)
    
    polynomial_scheme_builder = PolynomialSchemeEvaluator.generate_horner_scheme
    poly_degree_list = range(0, poly_degree)
    
    precision_list = [self.precision] *(len(poly_degree_list) + 1)
    poly_object, poly_error = Polynomial.build_from_approximation_with_error(expm1(sollya.x), poly_degree, precision_list, r_interval, sollya.absolute, error_function = error_function)
    sub_poly = poly_object.sub_poly(start_index = 2)
    Log.report(Log.Info, "Poly : %s" % sub_poly)
    Log.report(Log.Info, "poly error : {} / {:d}".format(poly_error, int(sollya.log2(poly_error))))
    pre_sub_poly = polynomial_scheme_builder(sub_poly, r, unified_precision = self.precision)
    poly = r + pre_sub_poly
    poly.set_attributes(tag = "poly", debug = debug_multi)
    
    exp_k = ExponentInsertion(ik, tag = "exp_k", debug = debug_multi, precision = self.precision)
    exp_mk = ExponentInsertion(-ik, tag = "exp_mk", debug = debug_multi, precision = self.precision)
    
    diff = 1 - exp_mk
    diff.set_attributes(tag = "diff", debug = debug_multi) 
    
    # Late Tests
    late_overflow_test = Comparison(ik, self.precision.get_emax(), specifier = Comparison.Greater, likely = False, debug = debug_multi, tag = "late_overflow_test")
    
    overflow_exp_offset = (self.precision.get_emax() - self.precision.get_field_size() / 2)
    diff_k = ik - overflow_exp_offset 
    
    exp_diff_k = ExponentInsertion(diff_k, precision = self.precision, tag = "exp_diff_k", debug = debug_multi)
    exp_oflow_offset = ExponentInsertion(overflow_exp_offset, precision = self.precision, tag = "exp_offset", debug = debug_multi)
    
    late_overflow_result = (exp_diff_k * (1 + poly)) * exp_oflow_offset - 1.0
    
    late_overflow_return = ConditionBlock(
        Test(late_overflow_result, specifier = Test.IsInfty, likely = False), 
        ExpRaiseReturn(ML_FPE_Overflow, return_value = FP_PlusInfty(self.precision)), 
        Return(late_overflow_result)
        )


    late_underflow_test = Comparison(k, self.precision.get_emin_normal(), specifier = Comparison.LessOrEqual, likely = False)
    
    underflow_exp_offset = 2 * self.precision.get_field_size()
    corrected_coeff = ik + underflow_exp_offset
    
    exp_corrected = ExponentInsertion(corrected_coeff, precision = self.precision)
    exp_uflow_offset = ExponentInsertion(-underflow_exp_offset, precision = self.precision)
    
    late_underflow_result = ( exp_corrected * (1 + poly)) * exp_uflow_offset - 1.0
    
    test_subnormal = Test(late_underflow_result, specifier = Test.IsSubnormal, likely = False)
    
    late_underflow_return = Statement(
        ConditionBlock(
            test_subnormal, 
            ExpRaiseReturn(ML_FPE_Underflow, return_value = late_underflow_result)), 
            Return(late_underflow_result)
            )
    
    # Reconstruction
    
    std_result = exp_k * ( poly + diff )
    std_result.set_attributes(tag = "result", debug = debug_multi)
    
    result_scheme = ConditionBlock(
        late_overflow_test, 
        late_overflow_return, 
        ConditionBlock(
            late_underflow_test, 
            late_underflow_return, 
            Return(std_result)
            )
        )
        
    std_return = ConditionBlock(
        overflow_test, 
        overflow_return, 
        ConditionBlock(
            underflow_test, 
            underflow_return, 
            result_scheme)
        )
        
    scheme = ConditionBlock(
        test_NaN_or_inf, 
        Statement(specific_return), 
        std_return
        )

    return scheme
예제 #21
0
    def generate_scalar_scheme(self, vx):
        # approximation the gamma function
        abs_vx = Abs(vx, precision=self.precision)

        FCT_LIMIT = 1.0

        omega_value = self.precision.get_omega()

        def sollya_wrap_bigfloat_fct(bfct):
            """ wrap bigfloat's function <bfct> such that is can be used
                on SollyaObject inputs and returns SollyaObject results """
            def fct(x):
                return sollya.SollyaObject(bfct(SollyaObject(x).bigfloat()))

            return fct

        sollya_gamma = sollya_wrap_bigfloat_fct(bigfloat.gamma)
        sollya_digamma = sollya_wrap_bigfloat_fct(bigfloat.digamma)
        # first derivative of gamma is digamma * gamma
        bigfloat_gamma_d0 = lambda x: bigfloat.gamma(x) * bigfloat.digamma(x)
        sollya_gamma_d0 = sollya_wrap_bigfloat_fct(bigfloat_gamma_d0)

        # approximating trigamma with straightforward derivatives formulae of digamma
        U = 2**-64
        bigfloat_trigamma = lambda x: (
            (bigfloat.digamma(x * (1 + U)) - bigfloat.digamma(x)) / (x * U))
        sollya_trigamma = sollya_wrap_bigfloat_fct(bigfloat_trigamma)

        bigfloat_gamma_d1 = lambda x: (bigfloat_trigamma(x) * bigfloat.gamma(
            x) + bigfloat_gamma_d0(x) * bigfloat.digamma(x))
        sollya_gamma_d1 = sollya_wrap_bigfloat_fct(bigfloat_gamma_d1)

        def sollya_gamma_fct(x, diff_order, prec):
            """ wrapper to use bigfloat implementation of exponential
                rather than sollya's implementation directly.
                This wrapper implements sollya's function API.

                :param x: numerical input value (may be an Interval)
                :param diff_order: differential order
                :param prec: numerical precision expected (min)
            """
            fct = None
            if diff_order == 0:
                fct = sollya_gamma
            elif diff_order == 1:
                fct = sollya_gamma_d0
            elif diff_order == 2:
                fct = sollya_gamma_d1
            else:
                raise NotImplementedError
            with bigfloat.precision(prec):
                if x.is_range():
                    lo = sollya.inf(x)
                    hi = sollya.sup(x)
                    return sollya.Interval(fct(lo), fct(hi))
                else:
                    return fct(x)

        # search the lower x such that gamma(x) >= omega
        omega_upper_limit = search_bound_threshold(sollya_gamma, omega_value,
                                                   2, 1000.0, self.precision)
        Log.report(Log.Debug, "gamma(x) = {} limit is {}", omega_value,
                   omega_upper_limit)

        # evaluate gamma(<min-normal-value>)
        lower_x_bound = self.precision.get_min_normal_value()
        value_min = sollya_gamma(lower_x_bound)
        Log.report(Log.Debug, "gamma({}) = {}(log2={})", lower_x_bound,
                   value_min, int(sollya.log2(value_min)))

        # evaluate gamma(<min-subnormal-value>)
        lower_x_bound = self.precision.get_min_subnormal_value()
        value_min = sollya_gamma(lower_x_bound)
        Log.report(Log.Debug, "gamma({}) = {}(log2={})", lower_x_bound,
                   value_min, int(sollya.log2(value_min)))

        # Gamma is defined such that gamma(x+1) = x * gamma(x)
        #
        # we approximate gamma over [1, 2]
        # y in [1, 2]
        # gamma(y) = (y-1) * gamma(y-1)
        # gamma(y-1) = gamma(y) / (y-1)
        Log.report(Log.Info, "building mathematical polynomial")
        approx_interval = Interval(1, 2)
        approx_fct = sollya.function(sollya_gamma_fct)
        poly_degree = int(
            sup(
                guessdegree(approx_fct, approx_interval, S2**
                            -(self.precision.get_field_size() + 5)))) + 1
        Log.report(Log.Debug, "approximation's poly degree over [1, 2] is {}",
                   poly_degree)

        sys.exit(1)

        poly_degree_list = list(range(1, poly_degree, 2))
        Log.report(Log.Debug, "poly_degree is {} and list {}", poly_degree,
                   poly_degree_list)
        global_poly_object = Polynomial.build_from_approximation(
            approx_fct, poly_degree_list,
            [self.precision] * len(poly_degree_list), approx_interval,
            sollya.relative)
        Log.report(
            Log.Debug, "inform is {}",
            dirtyinfnorm(approx_fct - global_poly_object.get_sollya_object(),
                         approx_interval))
        poly_object = global_poly_object.sub_poly(start_index=1, offset=1)

        ext_precision = {
            ML_Binary32: ML_SingleSingle,
            ML_Binary64: ML_DoubleDouble,
        }[self.precision]

        pre_poly = PolynomialSchemeEvaluator.generate_horner_scheme(
            poly_object, abs_vx, unified_precision=self.precision)

        result = FMA(pre_poly, abs_vx, abs_vx)
        result.set_attributes(tag="result", debug=debug_multi)

        eps_target = S2**-(self.precision.get_field_size() + 5)

        def offset_div_function(fct):
            return lambda offset: fct(sollya.x + offset)

        # empiral numbers
        field_size = {ML_Binary32: 6, ML_Binary64: 8}[self.precision]

        near_indexing = SubFPIndexing(eps_exp, 0, 6, self.precision)
        near_approx = generic_poly_split(offset_div_function(sollya.erf),
                                         near_indexing, eps_target,
                                         self.precision, abs_vx)
        near_approx.set_attributes(tag="near_approx", debug=debug_multi)

        def offset_function(fct):
            return lambda offset: fct(sollya.x + offset)

        medium_indexing = SubFPIndexing(1, one_limit_exp, 7, self.precision)

        medium_approx = generic_poly_split(offset_function(sollya.erf),
                                           medium_indexing, eps_target,
                                           self.precision, abs_vx)
        medium_approx.set_attributes(tag="medium_approx", debug=debug_multi)

        # approximation for positive values
        scheme = ConditionBlock(
            abs_vx < eps, Return(result),
            ConditionBlock(
                abs_vx < near_indexing.get_max_bound(), Return(near_approx),
                ConditionBlock(abs_vx < medium_indexing.get_max_bound(),
                               Return(medium_approx),
                               Return(Constant(1.0,
                                               precision=self.precision)))))
        return scheme
예제 #22
0
def generate_payne_hanek(vx,
                         frac_pi,
                         precision,
                         n=100,
                         k=4,
                         chunk_num=None,
                         debug=False):
    """ generate payne and hanek argument reduction for frac_pi * variable """

    sollya.roundingwarnings = sollya.off
    debug_precision = debug_multi
    int_precision = {ML_Binary32: ML_Int32, ML_Binary64: ML_Int64}[precision]

    p = precision.get_field_size()

    # weight of the most significant digit of the constant
    cst_msb = floor(log2(abs(frac_pi)))
    # length of exponent range which must be covered by the approximation
    # of the constant
    cst_exp_range = cst_msb - precision.get_emin_subnormal() + 1

    # chunk size has to be so than multiplication by a splitted <v>
    # (vx_hi or vx_lo) is exact
    chunk_size = precision.get_field_size() / 2 - 2
    chunk_number = int(ceil((cst_exp_range + chunk_size - 1) / chunk_size))
    scaling_factor = S2**-(chunk_size / 2)

    chunk_size_cst = Constant(chunk_size, precision=ML_Int32)
    cst_msb_node = Constant(cst_msb, precision=ML_Int32)

    # Saving sollya's global precision
    old_global_prec = sollya.settings.prec
    sollya.settings.prec(cst_exp_range + n)

    # table to store chunk of constant multiplicand
    cst_table = ML_NewTable(dimensions=[chunk_number, 1],
                            storage_precision=precision,
                            tag="PH_cst_table")
    # table to store sqrt(scaling_factor) corresponding to the
    # cst multiplicand chunks
    scale_table = ML_NewTable(dimensions=[chunk_number, 1],
                              storage_precision=precision,
                              tag="PH_scale_table")
    tmp_cst = frac_pi

    # cst_table stores normalized constant chunks (they have been
    # scale back to close to 1.0 interval)
    #
    # scale_table stores the scaling factors corresponding to the
    # denormalization of cst_table coefficients

    # this loop divide the digits of frac_pi into chunks
    # the chunk lsb weight is given by a shift from
    # cst_msb, multiple of the chunk index
    for i in range(chunk_number):
        value_div_factor = S2**(chunk_size * (i + 1) - cst_msb)
        local_cst = int(tmp_cst * value_div_factor) / value_div_factor
        local_scale = (scaling_factor**i)
        # storing scaled constant chunks
        cst_table[i][0] = local_cst / (local_scale**2)
        scale_table[i][0] = local_scale
        # Updating constant value
        tmp_cst = tmp_cst - local_cst

    # Computing which part of the constant we do not need to multiply
    # In the following comments, vi represents the bit of frac_pi of weight 2**-i

    # Bits vi so that i <= (vx_exp - p + 1 -k)  are not needed, because they result
    # in a multiple of 2pi and do not contribute to trig functions.

    vx_exp = ExponentExtraction(
        vx, precision=vx.get_precision().get_integer_format())
    vx_exp = Conversion(vx_exp, precision=ML_Int32)

    msb_exp = -(vx_exp - p + 1 - k)
    msb_exp.set_attributes(tag="msb_exp", debug=debug_multi)
    msb_exp = Conversion(msb_exp, precision=ML_Int32)

    # Select the highest index where the reduction should start
    msb_index = Select(cst_msb_node < msb_exp, 0,
                       (cst_msb_node - msb_exp) / chunk_size_cst)
    msb_index.set_attributes(tag="msb_index", debug=debug_multi)

    # For a desired accuracy of 2**-n, bits vi so that i >= (vx_exp + n + 4)  are not needed, because they contribute less than
    # 2**-n to the result

    lsb_exp = -(vx_exp + n + 4)
    lsb_exp.set_attributes(tag="lsb_exp", debug=debug_multi)
    lsb_exp = Conversion(lsb_exp, precision=ML_Int32)

    # Index of the corresponding chunk
    lsb_index = (cst_msb_node - lsb_exp) / chunk_size_cst
    lsb_index.set_attributes(tag="lsb_index", debug=debug_multi)

    # Splitting vx
    half_size = precision.get_field_size() / 2 + 1

    # hi part (most significant digit) of vx input
    vx_hi = TypeCast(BitLogicAnd(
        TypeCast(vx, precision=int_precision),
        Constant(~int(2**half_size - 1), precision=int_precision)),
                     precision=precision)
    vx_hi.set_attributes(tag="vx_hi_ph")  #, debug = debug_multi)

    vx_lo = vx - vx_hi
    vx_lo.set_attributes(tag="vx_lo_ph")  #, debug = debug_multi)

    # loop iterator variable
    vi = Variable("i", precision=ML_Int32, var_type=Variable.Local)
    # step scaling factor
    half_scaling = Constant(S2**(-chunk_size / 2), precision=precision)

    i1 = Constant(1, precision=ML_Int32)

    # accumulator to the output precision
    acc = Variable("acc", precision=precision, var_type=Variable.Local)
    # integer accumulator
    acc_int = Variable("acc_int",
                       precision=int_precision,
                       var_type=Variable.Local)

    init_loop = Statement(
        vx_hi,
        vx_lo,
        ReferenceAssign(vi, msb_index),
        ReferenceAssign(acc, Constant(0, precision=precision)),
        ReferenceAssign(acc_int, Constant(0, precision=int_precision)),
    )

    cst_load = TableLoad(cst_table,
                         vi,
                         0,
                         tag="cst_load",
                         debug=debug_precision)
    sca_load = TableLoad(scale_table,
                         vi,
                         0,
                         tag="sca_load",
                         debug=debug_precision)
    # loop body
    # hi_mult = vx_hi * <scale_factor> * <cst>
    hi_mult = (vx_hi * sca_load) * (cst_load * sca_load)
    hi_mult.set_attributes(tag="hi_mult", debug=debug_precision)
    pre_hi_mult_int = NearestInteger(hi_mult,
                                     precision=int_precision,
                                     tag="hi_mult_int",
                                     debug=(debuglld if debug else None))
    hi_mult_int_f = Conversion(pre_hi_mult_int,
                               precision=precision,
                               tag="hi_mult_int_f",
                               debug=debug_precision)
    pre_hi_mult_red = (hi_mult - hi_mult_int_f).modify_attributes(
        tag="hi_mult_red", debug=debug_precision)

    # for the first chunks (vx_hi * <constant chunk>) exceeds 2**k+1 and may be
    # discard (whereas it may lead to overflow during integer conversion
    pre_exclude_hi = ((cst_msb_node - (vi + i1) * chunk_size + i1) +
                      (vx_exp + Constant(-half_size + 1, precision=ML_Int32))
                      ).modify_attributes(tag="pre_exclude_hi",
                                          debug=(debugd if debug else None))
    pre_exclude_hi.propagate_precision(ML_Int32,
                                       [cst_msb_node, vi, vx_exp, i1])
    Ck = Constant(k, precision=ML_Int32)
    exclude_hi = pre_exclude_hi <= Ck
    exclude_hi.set_attributes(tag="exclude_hi", debug=debug_multi)

    hi_mult_red = Select(exclude_hi, pre_hi_mult_red,
                         Constant(0, precision=precision))
    hi_mult_int = Select(exclude_hi, pre_hi_mult_int,
                         Constant(0, precision=int_precision))

    # lo part of the chunk reduction
    lo_mult = (vx_lo * sca_load) * (cst_load * sca_load)
    lo_mult.set_attributes(tag="lo_mult")  #, debug = debug_multi)
    lo_mult_int = NearestInteger(lo_mult,
                                 precision=int_precision,
                                 tag="lo_mult_int")  #, debug = debug_multi
    lo_mult_int_f = Conversion(lo_mult_int,
                               precision=precision,
                               tag="lo_mult_int_f")  #, debug = debug_multi)
    lo_mult_red = (lo_mult - lo_mult_int_f).modify_attributes(
        tag="lo_mult_red")  #, debug = debug_multi)

    # accumulating fractional part
    acc_expr = (acc + hi_mult_red) + lo_mult_red
    # accumulating integer part
    int_expr = ((acc_int + hi_mult_int) + lo_mult_int) % 2**(k + 1)

    CF1 = Constant(1, precision=precision)
    CI1 = Constant(1, precision=int_precision)

    # extracting exceeding integer part in fractionnal accumulator
    acc_expr_int = NearestInteger(acc_expr, precision=int_precision)
    # normalizing integer and fractionnal accumulator by subtracting then
    # adding exceeding integer part
    normalization = Statement(
        ReferenceAssign(
            acc, acc_expr - Conversion(acc_expr_int, precision=precision)),
        ReferenceAssign(acc_int, int_expr + acc_expr_int),
    )

    acc_expr.set_attributes(tag="acc_expr")  #, debug = debug_multi)
    int_expr.set_attributes(tag="int_expr")  #, debug = debug_multi)

    red_loop = Loop(
        init_loop, vi <= lsb_index,
        Statement(acc_expr, int_expr, normalization,
                  ReferenceAssign(vi, vi + 1)))

    result = Statement(lsb_index, msb_index, red_loop)

    # restoring sollya's global precision
    sollya.settings.prec = old_global_prec

    return result, acc, acc_int
예제 #23
0
    def generate_scalar_scheme(self, vx):
        Log.set_dump_stdout(True)

        Log.report(Log.Info, "\033[33;1m generating implementation scheme \033[0m")
        if self.debug_flag:
                Log.report(Log.Info, "\033[31;1m debug has been enabled \033[0;m")

        index_size = 5

        comp_lo = (vx < 0)
        comp_lo.set_attributes(tag = "comp_lo", precision = ML_Bool)
        sign = Select(comp_lo, -1, 1, precision = self.precision)

        # as sinh is an odd function, we can simplify the input to its absolute
        # value once the sign has been extracted
        vx = Abs(vx)
        int_precision = self.precision.get_integer_format()

        # argument reduction
        arg_reg_value = log(2)/2**index_size
        inv_log2_value = round(1/arg_reg_value, self.precision.get_sollya_object(), sollya.RN)
        inv_log2_cst = Constant(inv_log2_value, precision = self.precision, tag = "inv_log2")

        # for r_hi to be accurate we ensure k * log2_hi_value_cst is exact
        # by limiting the number of non-zero bits in log2_hi_value_cst
        # cosh(x) ~ exp(abs(x))/2    for a big enough x
        # cosh(x) > 2^1023 <=> exp(x) > 2^1024 <=> x > log(2^1024)
        # k = inv_log2_value * x
        # -1 for guard
        max_k_approx    = inv_log2_value * log(sollya.SollyaObject(2)**1024)
        max_k_bitsize = int(ceil(log2(max_k_approx)))
        Log.report(Log.Info, "max_k_bitsize: %d" % max_k_bitsize)
        log2_hi_value_precision = self.precision.get_precision() - max_k_bitsize - 1

        log2_hi_value = round(arg_reg_value, log2_hi_value_precision, sollya.RN)
        log2_lo_value = round(arg_reg_value - log2_hi_value, self.precision.get_sollya_object(), sollya.RN)
        log2_hi_value_cst = Constant(log2_hi_value, tag = "log2_hi_value", precision = self.precision)
        log2_lo_value_cst = Constant(log2_lo_value, tag = "log2_lo_value", precision = self.precision)

        k = Trunc(Multiplication(inv_log2_cst, vx), precision = self.precision)
        k_log2 = Multiplication(k, log2_hi_value_cst, precision = self.precision, exact = True, tag = "k_log2", unbreakable = True)
        r_hi = vx - k_log2
        r_hi.set_attributes(tag = "r_hi", debug = debug_multi, unbreakable = True)
        r_lo = -k * log2_lo_value_cst
        # reduced argument
        r = r_hi + r_lo
        r.set_attributes(tag = "r", debug = debug_multi)

        if is_gappa_installed():
                r_eval_error = self.get_eval_error(r_hi, variable_copy_map =
                    {
                        vx: Variable("vx", interval = Interval(0, 715), precision = self.precision),
                        k: Variable("k", interval = Interval(0, 1024), precision = self.precision)
                    })
                Log.report(Log.Verbose, "r_eval_error: ", r_eval_error)

        approx_interval = Interval(-arg_reg_value, arg_reg_value)
        error_goal_approx = 2**-(self.precision.get_precision())

        poly_degree = sup(guessdegree(exp(sollya.x), approx_interval, error_goal_approx)) + 3
        precision_list = [1] + [self.precision] * (poly_degree)

        k_integer = Conversion(k, precision = int_precision, tag = "k_integer", debug = debug_multi)
        k_hi = BitLogicRightShift(k_integer, Constant(index_size, precision=int_precision), tag = "k_int_hi", precision = int_precision, debug = debug_multi)
        k_lo = Modulo(k_integer, 2**index_size, tag = "k_int_lo", precision = int_precision, debug = debug_multi)
        pow_exp = ExponentInsertion(Conversion(k_hi, precision = int_precision), precision = self.precision, tag = "pow_exp", debug = debug_multi)

        exp_table = ML_NewTable(dimensions = [2 * 2**index_size, 4], storage_precision = self.precision, tag = self.uniquify_name("exp2_table"))
        for i in range(2 * 2**index_size):
            input_value = i - 2**index_size if i >= 2**index_size else i

            reduced_hi_prec = int(self.precision.get_mantissa_size() - 8)
            # using SollyaObject wrapper to force evaluation by sollya
            # with higher precision
            exp_value    = sollya.SollyaObject(2)**((input_value)* 2**-index_size)
            mexp_value = sollya.SollyaObject(2)**((-input_value)* 2**-index_size)
            pos_value_hi = round(exp_value, reduced_hi_prec, sollya.RN)
            pos_value_lo = round(exp_value - pos_value_hi, self.precision.get_sollya_object(), sollya.RN)
            neg_value_hi = round(mexp_value, reduced_hi_prec, sollya.RN)
            neg_value_lo = round(mexp_value - neg_value_hi, self.precision.get_sollya_object(), sollya.RN)
            exp_table[i][0] = neg_value_hi
            exp_table[i][1] = neg_value_lo
            exp_table[i][2] = pos_value_hi
            exp_table[i][3] = pos_value_lo

        # log2_value = log(2) / 2^index_size
        # sinh(x) = 1/2 * (exp(x) - exp(-x))
        # exp(x) = exp(x - k * log2_value + k * log2_value)
        #
        # r = x - k * log2_value
        # exp(x) = exp(r) * 2 ^ (k / 2^index_size)
        #
        # k / 2^index_size = h + l * 2^-index_size, with k, h, l integers
        # exp(x) = exp(r) * 2^h * 2^(l *2^-index_size)
        #
        # sinh(x) = exp(r) * 2^(h-1) * 2^(l *2^-index_size) - exp(-r) * 2^(-h-1) * 2^(-l *2^-index_size)
        # S=2^(h-1), T = 2^(-h-1)
        # exp(r)    = 1 + poly_pos(r)
        # exp(-r) = 1 + poly_neg(r)
        # 2^(l / 2^index_size)    = pos_value_hi + pos_value_lo
        # 2^(-l / 2^index_size) = neg_value_hi + neg_value_lo
        #

        error_function = lambda p, f, ai, mod, t: dirtyinfnorm(f - p, ai)

        poly_object, poly_approx_error = Polynomial.build_from_approximation_with_error(exp(sollya.x), poly_degree, precision_list, approx_interval, sollya.absolute, error_function = error_function)

        Log.report(Log.Verbose, "poly_approx_error: {}, {}".format(poly_approx_error, float(log2(poly_approx_error))))

        polynomial_scheme_builder = PolynomialSchemeEvaluator.generate_horner_scheme
        poly_pos = polynomial_scheme_builder(poly_object.sub_poly(start_index = 1), r, unified_precision = self.precision)
        poly_pos.set_attributes(tag = "poly_pos", debug = debug_multi)

        poly_neg = polynomial_scheme_builder(poly_object.sub_poly(start_index = 1), -r, unified_precision = self.precision)
        poly_neg.set_attributes(tag = "poly_neg", debug = debug_multi)

        table_index = Addition(k_lo, Constant(2**index_size, precision = int_precision), precision = int_precision, tag = "table_index", debug = debug_multi)

        neg_value_load_hi = TableLoad(exp_table, table_index, 0, tag = "neg_value_load_hi", debug = debug_multi)
        neg_value_load_lo = TableLoad(exp_table, table_index, 1, tag = "neg_value_load_lo", debug = debug_multi)
        pos_value_load_hi = TableLoad(exp_table, table_index, 2, tag = "pos_value_load_hi", debug = debug_multi)
        pos_value_load_lo = TableLoad(exp_table, table_index, 3, tag = "pos_value_load_lo", debug = debug_multi)

        k_plus = Max(
            Subtraction(k_hi, Constant(1, precision = int_precision), precision=int_precision, tag="k_plus", debug=debug_multi),
            Constant(self.precision.get_emin_normal(), precision = int_precision))
        k_neg = Max(
            Subtraction(-k_hi, Constant(1, precision=int_precision), precision=int_precision, tag="k_neg", debug=debug_multi),
            Constant(self.precision.get_emin_normal(), precision = int_precision))

        # 2^(h-1)
        pow_exp_pos = ExponentInsertion(k_plus, precision = self.precision, tag="pow_exp_pos", debug=debug_multi)
        # 2^(-h-1)
        pow_exp_neg = ExponentInsertion(k_neg, precision = self.precision, tag="pow_exp_neg", debug=debug_multi)

        hi_terms = (pos_value_load_hi * pow_exp_pos - neg_value_load_hi * pow_exp_neg)
        hi_terms.set_attributes(tag = "hi_terms", debug=debug_multi)


        pos_exp = (pos_value_load_hi * poly_pos + (pos_value_load_lo + pos_value_load_lo * poly_pos)) * pow_exp_pos
        pos_exp.set_attributes(tag = "pos_exp", debug = debug_multi)

        neg_exp = (neg_value_load_hi * poly_neg + (neg_value_load_lo + neg_value_load_lo * poly_neg)) * pow_exp_neg
        neg_exp.set_attributes(tag = "neg_exp", debug = debug_multi)

        result = Addition(
            Subtraction(
                pos_exp,
                neg_exp,
                precision=self.precision,
            ),
            hi_terms,
            precision=self.precision,
            tag="result",
            debug=debug_multi
        )

        # ov_value
        ov_value = round(asinh(self.precision.get_max_value()), self.precision.get_sollya_object(), sollya.RD)
        ov_flag = Comparison(Abs(vx), Constant(ov_value, precision = self.precision), specifier = Comparison.Greater)

        # main scheme
        scheme = Statement(
            Return(
                Select(
                    ov_flag,
                    sign*FP_PlusInfty(self.precision),
                    sign*result
                )))

        return scheme
예제 #24
0
  def generate_scheme(self):
    #func_implementation = CodeFunction(self.function_name, output_format = self.precision)
    vx = self.implementation.add_input_variable("x", self.get_input_precision()) 

    sollya_precision = self.get_sollya_precision()

    # retrieving processor inverse approximation table
    #dummy_var = Variable("dummy", precision = self.precision)
    #dummy_div_seed = DivisionSeed(dummy_var, precision = self.precision)
    #inv_approx_table = self.processor.get_recursive_implementation(dummy_div_seed, language = None, table_getter = lambda self: self.approx_table_map)
    lo_bound_global = SollyaObject(0.0)
    hi_bound_global = SollyaObject(0.75)
    approx_interval = Interval(lo_bound_global, hi_bound_global)
    approx_interval_size = hi_bound_global - lo_bound_global

    # table creation
    table_index_size = 7
    field_index_size = 2
    exp_index_size = table_index_size - field_index_size

    table_size = 2**table_index_size
    table_index_range = range(table_size)

    local_degree = 9
    coeff_table = ML_Table(dimensions = [table_size, local_degree], storage_precision = self.precision)

    #local_interval_size = approx_interval_size / SollyaObject(table_size)
    #for i in table_index_range:
    #  degree = 6
    #  lo_bound = lo_bound_global + i * local_interval_size
    #  hi_bound = lo_bound_global + (i+1) * local_interval_size
    #  approx_interval = Interval(lo_bound, hi_bound)
    #  local_poly_object, local_error = Polynomial.build_from_approximation_with_error(acos(x), degree, [self.precision] * (degree+1), approx_interval, absolute)
    #  local_error = int(log2(sup(abs(local_error / acos(approx_interval)))))
    #  print approx_interval, local_error

    exp_lo = 2**exp_index_size
    for i in table_index_range:
      lo_bound = (1.0 + (i % 2**field_index_size) * S2**-field_index_size) * S2**(i / 2**field_index_size - exp_lo)
      hi_bound = (1.0 + ((i % 2**field_index_size) + 1) * S2**-field_index_size) * S2**(i / 2**field_index_size - exp_lo)
      local_approx_interval = Interval(lo_bound, hi_bound)
      local_poly_object, local_error = Polynomial.build_from_approximation_with_error(acos(1 - x), local_degree, [self.precision] * (local_degree+1), local_approx_interval, sollya.absolute)
      local_error = int(log2(sup(abs(local_error / acos(1 - local_approx_interval)))))
      coeff_table
      print local_approx_interval, local_error
      for d in xrange(local_degree):
        coeff_table[i][d] = sollya.coeff(local_poly_object.get_sollya_object(), d) 

    table_index = BitLogicRightShift(vx, vx.get_precision().get_field_size() - field_index_size) - (exp_lo << field_index_size)




    print "building mathematical polynomial"
    poly_degree = sup(sollya.guessdegree(acos(x), approx_interval, S2**-(self.precision.get_field_size()))) 
    print "guessed polynomial degree: ", int(poly_degree)
    #global_poly_object = Polynomial.build_from_approximation(log10(1+x)/x, poly_degree, [self.precision]*(poly_degree+1), approx_interval, absolute)

    print "generating polynomial evaluation scheme"
    #_poly = PolynomialSchemeEvaluator.generate_horner_scheme(poly_object, _red_vx, unified_precision = self.precision)

    # building eval error map
    #eval_error_map = {
    #  red_vx: Variable("red_vx", precision = self.precision, interval = red_vx.get_interval()),
    #  log_inv_hi: Variable("log_inv_hi", precision = self.precision, interval = table_high_interval),
    #  log_inv_lo: Variable("log_inv_lo", precision = self.precision, interval = table_low_interval),
    #}
    # computing gappa error
    #poly_eval_error = self.get_eval_error(result, eval_error_map)



    # main scheme
    print "MDL scheme"
    scheme = Statement(Return(vx))
    return scheme
예제 #25
0
def ulp(v, format_):
    """ return a 'unit in last place' value for <v> assuming precision is defined by format _ """
    return sollya.S2**(sollya.ceil(sollya.log2(sollya.abs(v))) -
                       (format_.get_precision() + 1))
예제 #26
0
    def generate_scheme(self):
        # declaring CodeFunction and retrieving input variable
        vx = self.implementation.add_input_variable("x", self.precision)

        table_size_log = self.table_size_log
        integer_size = 31
        integer_precision = ML_Int32

        max_bound = sup(abs(self.input_intervals[0]))
        max_bound_log = int(ceil(log2(max_bound)))
        Log.report(Log.Info, "max_bound_log=%s " % max_bound_log)
        scaling_power = integer_size - max_bound_log
        Log.report(Log.Info, "scaling power: %s " % scaling_power)

        storage_precision = ML_Custom_FixedPoint_Format(1, 30, signed=True)

        Log.report(Log.Info, "tabulating cosine and sine")
        # cosine and sine fused table
        fused_table = ML_NewTable(
            dimensions=[2**table_size_log, 2],
            storage_precision=storage_precision,
            tag="fast_lib_shared_table")  # self.uniquify_name("cossin_table"))
        # filling table
        for i in range(2**table_size_log):
            local_x = i / S2**table_size_log * S2**max_bound_log

            cos_local = cos(
                local_x
            )  # nearestint(cos(local_x) * S2**storage_precision.get_frac_size())

            sin_local = sin(
                local_x
            )  # nearestint(sin(local_x) * S2**storage_precision.get_frac_size())

            fused_table[i][0] = cos_local
            fused_table[i][1] = sin_local

        # argument reduction evaluation scheme
        # scaling_factor = Constant(S2**scaling_power, precision = self.precision)

        red_vx_precision = ML_Custom_FixedPoint_Format(31 - scaling_power,
                                                       scaling_power,
                                                       signed=True)
        Log.report(
            Log.Verbose, "red_vx_precision.get_c_bit_size()=%d" %
            red_vx_precision.get_c_bit_size())
        # red_vx = NearestInteger(vx * scaling_factor, precision = integer_precision)
        red_vx = Conversion(vx,
                            precision=red_vx_precision,
                            tag="red_vx",
                            debug=debug_fixed32)

        computation_precision = red_vx_precision  # self.precision
        output_precision = self.get_output_precision()
        Log.report(Log.Info,
                   "computation_precision is %s" % computation_precision)
        Log.report(Log.Info, "storage_precision     is %s" % storage_precision)
        Log.report(Log.Info, "output_precision      is %s" % output_precision)

        hi_mask_value = 2**32 - 2**(32 - table_size_log - 1)
        hi_mask = Constant(hi_mask_value, precision=ML_Int32)
        Log.report(Log.Info, "hi_mask=0x%x" % hi_mask_value)

        red_vx_hi_int = BitLogicAnd(TypeCast(red_vx, precision=ML_Int32),
                                    hi_mask,
                                    precision=ML_Int32,
                                    tag="red_vx_hi_int",
                                    debug=debugd)
        red_vx_hi = TypeCast(red_vx_hi_int,
                             precision=red_vx_precision,
                             tag="red_vx_hi",
                             debug=debug_fixed32)
        red_vx_lo = red_vx - red_vx_hi
        red_vx_lo.set_attributes(precision=red_vx_precision,
                                 tag="red_vx_lo",
                                 debug=debug_fixed32)
        table_index = BitLogicRightShift(TypeCast(red_vx, precision=ML_Int32),
                                         scaling_power -
                                         (table_size_log - max_bound_log),
                                         precision=ML_Int32,
                                         tag="table_index",
                                         debug=debugd)

        tabulated_cos = TableLoad(fused_table,
                                  table_index,
                                  0,
                                  tag="tab_cos",
                                  precision=storage_precision,
                                  debug=debug_fixed32)
        tabulated_sin = TableLoad(fused_table,
                                  table_index,
                                  1,
                                  tag="tab_sin",
                                  precision=storage_precision,
                                  debug=debug_fixed32)

        error_function = lambda p, f, ai, mod, t: dirtyinfnorm(f - p, ai)

        Log.report(Log.Info, "building polynomial approximation for cosine")
        # cosine polynomial approximation
        poly_interval = Interval(0, S2**(max_bound_log - table_size_log))
        Log.report(Log.Info, "poly_interval=%s " % poly_interval)
        cos_poly_degree = 2  # int(sup(guessdegree(cos(x), poly_interval, accuracy_goal)))

        Log.report(Log.Verbose, "cosine polynomial approximation")
        cos_poly_object, cos_approx_error = Polynomial.build_from_approximation_with_error(
            cos(sollya.x), [0, 2],
            [0] + [computation_precision.get_bit_size()],
            poly_interval,
            sollya.absolute,
            error_function=error_function)
        #cos_eval_scheme = PolynomialSchemeEvaluator.generate_horner_scheme(cos_poly_object, red_vx_lo, unified_precision = computation_precision)
        Log.report(Log.Info, "cos_approx_error=%e" % cos_approx_error)
        cos_coeff_list = cos_poly_object.get_ordered_coeff_list()
        coeff_C0 = cos_coeff_list[0][1]
        coeff_C2 = Constant(cos_coeff_list[1][1],
                            precision=ML_Custom_FixedPoint_Format(-1,
                                                                  32,
                                                                  signed=True))

        Log.report(Log.Info, "building polynomial approximation for sine")

        # sine polynomial approximation
        sin_poly_degree = 2  # int(sup(guessdegree(sin(x)/x, poly_interval, accuracy_goal)))
        Log.report(Log.Info, "sine poly degree: %e" % sin_poly_degree)
        Log.report(Log.Verbose, "sine polynomial approximation")
        sin_poly_object, sin_approx_error = Polynomial.build_from_approximation_with_error(
            sin(sollya.x) / sollya.x, [0, 2], [0] +
            [computation_precision.get_bit_size()] * (sin_poly_degree + 1),
            poly_interval,
            sollya.absolute,
            error_function=error_function)
        sin_coeff_list = sin_poly_object.get_ordered_coeff_list()
        coeff_S0 = sin_coeff_list[0][1]
        coeff_S2 = Constant(sin_coeff_list[1][1],
                            precision=ML_Custom_FixedPoint_Format(-1,
                                                                  32,
                                                                  signed=True))

        # scheme selection between sine and cosine
        if self.cos_output:
            scheme = self.generate_cos_scheme(computation_precision,
                                              tabulated_cos, tabulated_sin,
                                              coeff_S2, coeff_C2, red_vx_lo)
        else:
            scheme = self.generate_sin_scheme(computation_precision,
                                              tabulated_cos, tabulated_sin,
                                              coeff_S2, coeff_C2, red_vx_lo)

        result = Conversion(scheme, precision=self.get_output_precision())

        Log.report(
            Log.Verbose, "result operation tree :\n %s " % result.get_str(
                display_precision=True, depth=None, memoization_map={}))
        scheme = Statement(Return(result))

        return scheme
예제 #27
0
def get_value_exp(value):
    """ return the binary exponent of value """
    return sollya.ceil(sollya.log2(abs(value)))
예제 #28
0
    def generate_scalar_scheme(self, vx):
        abs_vx = Abs(vx, precision=self.precision)

        FCT_LIMIT = 1.0

        one_limit = search_bound_threshold(sollya.erf, FCT_LIMIT, 1.0, 10.0,
                                           self.precision)
        one_limit_exp = int(sollya.floor(sollya.log2(one_limit)))
        Log.report(Log.Debug, "erf(x) = 1.0 limit is {}, with exp={}",
                   one_limit, one_limit_exp)

        upper_approx_bound = 10

        # empiral numbers
        eps_exp = {ML_Binary32: -3, ML_Binary64: -5}[self.precision]
        eps = S2**eps_exp

        Log.report(Log.Info, "building mathematical polynomial")
        approx_interval = Interval(0, eps)
        # fonction to approximate is erf(x) / x
        # it is an even function erf(x) / x = erf(-x) / (-x)
        approx_fct = sollya.erf(sollya.x) - (sollya.x)
        poly_degree = int(
            sup(
                guessdegree(approx_fct, approx_interval, S2**
                            -(self.precision.get_field_size() + 5)))) + 1

        poly_degree_list = list(range(1, poly_degree, 2))
        Log.report(Log.Debug, "poly_degree is {} and list {}", poly_degree,
                   poly_degree_list)
        global_poly_object = Polynomial.build_from_approximation(
            approx_fct, poly_degree_list,
            [self.precision] * len(poly_degree_list), approx_interval,
            sollya.relative)
        Log.report(
            Log.Debug, "inform is {}",
            dirtyinfnorm(approx_fct - global_poly_object.get_sollya_object(),
                         approx_interval))
        poly_object = global_poly_object.sub_poly(start_index=1, offset=1)

        ext_precision = {
            ML_Binary32: ML_SingleSingle,
            ML_Binary64: ML_DoubleDouble,
        }[self.precision]

        pre_poly = PolynomialSchemeEvaluator.generate_horner_scheme(
            poly_object, abs_vx, unified_precision=self.precision)

        result = FMA(pre_poly, abs_vx, abs_vx)
        result.set_attributes(tag="result", debug=debug_multi)

        eps_target = S2**-(self.precision.get_field_size() + 5)

        def offset_div_function(fct):
            return lambda offset: fct(sollya.x + offset)

        # empiral numbers
        field_size = {ML_Binary32: 6, ML_Binary64: 8}[self.precision]

        near_indexing = SubFPIndexing(eps_exp, 0, 6, self.precision)
        near_approx = generic_poly_split(offset_div_function(sollya.erf),
                                         near_indexing, eps_target,
                                         self.precision, abs_vx)
        near_approx.set_attributes(tag="near_approx", debug=debug_multi)

        def offset_function(fct):
            return lambda offset: fct(sollya.x + offset)

        medium_indexing = SubFPIndexing(1, one_limit_exp, 7, self.precision)

        medium_approx = generic_poly_split(offset_function(sollya.erf),
                                           medium_indexing, eps_target,
                                           self.precision, abs_vx)
        medium_approx.set_attributes(tag="medium_approx", debug=debug_multi)

        # approximation for positive values
        scheme = ConditionBlock(
            abs_vx < eps, Return(result),
            ConditionBlock(
                abs_vx < near_indexing.get_max_bound(), Return(near_approx),
                ConditionBlock(abs_vx < medium_indexing.get_max_bound(),
                               Return(medium_approx),
                               Return(Constant(1.0,
                                               precision=self.precision)))))
        return scheme
예제 #29
0
    def generate_scheme(self):
        # declaring target and instantiating optimization engine
        vx = self.implementation.add_input_variable("x", self.precision)

        Log.set_dump_stdout(True)

        Log.report(Log.Info,
                   "\033[33;1m generating implementation scheme \033[0m")
        if self.debug_flag:
            Log.report(Log.Info, "\033[31;1m debug has been enabled \033[0;m")

        # local overloading of RaiseReturn operation
        def ExpRaiseReturn(*args, **kwords):
            kwords["arg_value"] = vx
            kwords["function_name"] = self.function_name
            if self.libm_compliant:
                return RaiseReturn(*args, precision=self.precision, **kwords)
            else:
                return Return(kwords["return_value"], precision=self.precision)

        test_nan_or_inf = Test(vx,
                               specifier=Test.IsInfOrNaN,
                               likely=False,
                               debug=debug_multi,
                               tag="nan_or_inf")
        test_nan = Test(vx,
                        specifier=Test.IsNaN,
                        debug=debug_multi,
                        tag="is_nan_test")
        test_positive = Comparison(vx,
                                   0,
                                   specifier=Comparison.GreaterOrEqual,
                                   debug=debug_multi,
                                   tag="inf_sign")

        test_signaling_nan = Test(vx,
                                  specifier=Test.IsSignalingNaN,
                                  debug=debug_multi,
                                  tag="is_signaling_nan")
        return_snan = Statement(
            ExpRaiseReturn(ML_FPE_Invalid,
                           return_value=FP_QNaN(self.precision)))

        # return in case of infinity input
        infty_return = Statement(
            ConditionBlock(
                test_positive,
                Return(FP_PlusInfty(self.precision), precision=self.precision),
                Return(FP_PlusZero(self.precision), precision=self.precision)))
        # return in case of specific value input (NaN or inf)
        specific_return = ConditionBlock(
            test_nan,
            ConditionBlock(
                test_signaling_nan, return_snan,
                Return(FP_QNaN(self.precision), precision=self.precision)),
            infty_return)
        # return in case of standard (non-special) input

        # exclusion of early overflow and underflow cases
        precision_emax = self.precision.get_emax()
        precision_max_value = S2 * S2**precision_emax
        exp_overflow_bound = sollya.ceil(log(precision_max_value))
        early_overflow_test = Comparison(vx,
                                         exp_overflow_bound,
                                         likely=False,
                                         specifier=Comparison.Greater)
        early_overflow_return = Statement(
            ClearException() if self.libm_compliant else Statement(),
            ExpRaiseReturn(ML_FPE_Inexact,
                           ML_FPE_Overflow,
                           return_value=FP_PlusInfty(self.precision)))

        precision_emin = self.precision.get_emin_subnormal()
        precision_min_value = S2**precision_emin
        exp_underflow_bound = floor(log(precision_min_value))

        early_underflow_test = Comparison(vx,
                                          exp_underflow_bound,
                                          likely=False,
                                          specifier=Comparison.Less)
        early_underflow_return = Statement(
            ClearException() if self.libm_compliant else Statement(),
            ExpRaiseReturn(ML_FPE_Inexact,
                           ML_FPE_Underflow,
                           return_value=FP_PlusZero(self.precision)))

        # constant computation
        invlog2 = self.precision.round_sollya_object(1 / log(2), sollya.RN)

        interval_vx = Interval(exp_underflow_bound, exp_overflow_bound)
        interval_fk = interval_vx * invlog2
        interval_k = Interval(floor(inf(interval_fk)),
                              sollya.ceil(sup(interval_fk)))

        log2_hi_precision = self.precision.get_field_size() - (
            sollya.ceil(log2(sup(abs(interval_k)))) + 2)
        Log.report(Log.Info, "log2_hi_precision: %d" % log2_hi_precision)
        invlog2_cst = Constant(invlog2, precision=self.precision)
        log2_hi = round(log(2), log2_hi_precision, sollya.RN)
        log2_lo = self.precision.round_sollya_object(
            log(2) - log2_hi, sollya.RN)

        # argument reduction
        unround_k = vx * invlog2
        unround_k.set_attributes(tag="unround_k", debug=debug_multi)
        k = NearestInteger(unround_k,
                           precision=self.precision,
                           debug=debug_multi)
        ik = NearestInteger(unround_k,
                            precision=self.precision.get_integer_format(),
                            debug=debug_multi,
                            tag="ik")
        ik.set_tag("ik")
        k.set_tag("k")
        exact_pre_mul = (k * log2_hi)
        exact_pre_mul.set_attributes(exact=True)
        exact_hi_part = vx - exact_pre_mul
        exact_hi_part.set_attributes(exact=True,
                                     tag="exact_hi",
                                     debug=debug_multi,
                                     prevent_optimization=True)
        exact_lo_part = -k * log2_lo
        exact_lo_part.set_attributes(tag="exact_lo",
                                     debug=debug_multi,
                                     prevent_optimization=True)
        r = exact_hi_part + exact_lo_part
        r.set_tag("r")
        r.set_attributes(debug=debug_multi)

        approx_interval = Interval(-log(2) / 2, log(2) / 2)

        approx_interval_half = approx_interval / 2
        approx_interval_split = [
            Interval(-log(2) / 2, inf(approx_interval_half)),
            approx_interval_half,
            Interval(sup(approx_interval_half),
                     log(2) / 2)
        ]

        # TODO: should be computed automatically
        exact_hi_interval = approx_interval
        exact_lo_interval = -interval_k * log2_lo

        opt_r = self.optimise_scheme(r, copy={})

        tag_map = {}
        self.opt_engine.register_nodes_by_tag(opt_r, tag_map)

        cg_eval_error_copy_map = {
            vx:
            Variable("x", precision=self.precision, interval=interval_vx),
            tag_map["k"]:
            Variable("k", interval=interval_k, precision=self.precision)
        }

        #try:
        if is_gappa_installed():
            eval_error = self.gappa_engine.get_eval_error_v2(
                self.opt_engine,
                opt_r,
                cg_eval_error_copy_map,
                gappa_filename="red_arg.g")
        else:
            eval_error = 0.0
            Log.report(Log.Warning,
                       "gappa is not installed in this environnement")
        Log.report(Log.Info, "eval error: %s" % eval_error)

        local_ulp = sup(ulp(sollya.exp(approx_interval), self.precision))
        # FIXME refactor error_goal from accuracy
        Log.report(Log.Info, "accuracy: %s" % self.accuracy)
        if isinstance(self.accuracy, ML_Faithful):
            error_goal = local_ulp
        elif isinstance(self.accuracy, ML_CorrectlyRounded):
            error_goal = S2**-1 * local_ulp
        elif isinstance(self.accuracy, ML_DegradedAccuracyAbsolute):
            error_goal = self.accuracy.goal
        elif isinstance(self.accuracy, ML_DegradedAccuracyRelative):
            error_goal = self.accuracy.goal
        else:
            Log.report(Log.Error, "unknown accuracy: %s" % self.accuracy)

        # error_goal = local_ulp #S2**-(self.precision.get_field_size()+1)
        error_goal_approx = S2**-1 * error_goal

        Log.report(Log.Info,
                   "\033[33;1m building mathematical polynomial \033[0m\n")
        poly_degree = max(
            sup(
                guessdegree(
                    expm1(sollya.x) / sollya.x, approx_interval,
                    error_goal_approx)) - 1, 2)
        init_poly_degree = poly_degree

        error_function = lambda p, f, ai, mod, t: dirtyinfnorm(f - p, ai)

        polynomial_scheme_builder = PolynomialSchemeEvaluator.generate_estrin_scheme
        #polynomial_scheme_builder = PolynomialSchemeEvaluator.generate_horner_scheme

        while 1:
            Log.report(Log.Info, "attempting poly degree: %d" % poly_degree)
            precision_list = [1] + [self.precision] * (poly_degree)
            poly_object, poly_approx_error = Polynomial.build_from_approximation_with_error(
                expm1(sollya.x),
                poly_degree,
                precision_list,
                approx_interval,
                sollya.absolute,
                error_function=error_function)
            Log.report(Log.Info, "polynomial: %s " % poly_object)
            sub_poly = poly_object.sub_poly(start_index=2)
            Log.report(Log.Info, "polynomial: %s " % sub_poly)

            Log.report(Log.Info, "poly approx error: %s" % poly_approx_error)

            Log.report(
                Log.Info,
                "\033[33;1m generating polynomial evaluation scheme \033[0m")
            pre_poly = polynomial_scheme_builder(
                poly_object, r, unified_precision=self.precision)
            pre_poly.set_attributes(tag="pre_poly", debug=debug_multi)

            pre_sub_poly = polynomial_scheme_builder(
                sub_poly, r, unified_precision=self.precision)
            pre_sub_poly.set_attributes(tag="pre_sub_poly", debug=debug_multi)

            poly = 1 + (exact_hi_part + (exact_lo_part + pre_sub_poly))
            poly.set_tag("poly")

            # optimizing poly before evaluation error computation
            #opt_poly = self.opt_engine.optimization_process(poly, self.precision, fuse_fma = fuse_fma)
            #opt_sub_poly = self.opt_engine.optimization_process(pre_sub_poly, self.precision, fuse_fma = fuse_fma)
            opt_poly = self.optimise_scheme(poly)
            opt_sub_poly = self.optimise_scheme(pre_sub_poly)

            # evaluating error of the polynomial approximation
            r_gappa_var = Variable("r",
                                   precision=self.precision,
                                   interval=approx_interval)
            exact_hi_gappa_var = Variable("exact_hi",
                                          precision=self.precision,
                                          interval=exact_hi_interval)
            exact_lo_gappa_var = Variable("exact_lo",
                                          precision=self.precision,
                                          interval=exact_lo_interval)
            vx_gappa_var = Variable("x",
                                    precision=self.precision,
                                    interval=interval_vx)
            k_gappa_var = Variable("k",
                                   interval=interval_k,
                                   precision=self.precision)

            #print "exact_hi interval: ", exact_hi_interval

            sub_poly_error_copy_map = {
                #r.get_handle().get_node(): r_gappa_var,
                #vx.get_handle().get_node():  vx_gappa_var,
                exact_hi_part.get_handle().get_node():
                exact_hi_gappa_var,
                exact_lo_part.get_handle().get_node():
                exact_lo_gappa_var,
                #k.get_handle().get_node(): k_gappa_var,
            }

            poly_error_copy_map = {
                exact_hi_part.get_handle().get_node(): exact_hi_gappa_var,
                exact_lo_part.get_handle().get_node(): exact_lo_gappa_var,
            }

            if is_gappa_installed():
                sub_poly_eval_error = -1.0
                sub_poly_eval_error = self.gappa_engine.get_eval_error_v2(
                    self.opt_engine,
                    opt_sub_poly,
                    sub_poly_error_copy_map,
                    gappa_filename="%s_gappa_sub_poly.g" % self.function_name)

                dichotomy_map = [
                    {
                        exact_hi_part.get_handle().get_node():
                        approx_interval_split[0],
                    },
                    {
                        exact_hi_part.get_handle().get_node():
                        approx_interval_split[1],
                    },
                    {
                        exact_hi_part.get_handle().get_node():
                        approx_interval_split[2],
                    },
                ]
                poly_eval_error_dico = self.gappa_engine.get_eval_error_v3(
                    self.opt_engine,
                    opt_poly,
                    poly_error_copy_map,
                    gappa_filename="gappa_poly.g",
                    dichotomy=dichotomy_map)

                poly_eval_error = max(
                    [sup(abs(err)) for err in poly_eval_error_dico])
            else:
                poly_eval_error = 0.0
                sub_poly_eval_error = 0.0
                Log.report(Log.Warning,
                           "gappa is not installed in this environnement")
                Log.report(Log.Info, "stopping autonomous degree research")
                # incrementing polynomial degree to counteract initial decrementation effect
                poly_degree += 1
                break
            Log.report(Log.Info, "poly evaluation error: %s" % poly_eval_error)
            Log.report(Log.Info,
                       "sub poly evaluation error: %s" % sub_poly_eval_error)

            global_poly_error = None
            global_rel_poly_error = None

            for case_index in range(3):
                poly_error = poly_approx_error + poly_eval_error_dico[
                    case_index]
                rel_poly_error = sup(
                    abs(poly_error /
                        sollya.exp(approx_interval_split[case_index])))
                if global_rel_poly_error == None or rel_poly_error > global_rel_poly_error:
                    global_rel_poly_error = rel_poly_error
                    global_poly_error = poly_error
            flag = error_goal > global_rel_poly_error

            if flag:
                break
            else:
                poly_degree += 1

        late_overflow_test = Comparison(ik,
                                        self.precision.get_emax(),
                                        specifier=Comparison.Greater,
                                        likely=False,
                                        debug=debug_multi,
                                        tag="late_overflow_test")
        overflow_exp_offset = (self.precision.get_emax() -
                               self.precision.get_field_size() / 2)
        diff_k = Subtraction(
            ik,
            Constant(overflow_exp_offset,
                     precision=self.precision.get_integer_format()),
            precision=self.precision.get_integer_format(),
            debug=debug_multi,
            tag="diff_k",
        )
        late_overflow_result = (ExponentInsertion(
            diff_k, precision=self.precision) * poly) * ExponentInsertion(
                overflow_exp_offset, precision=self.precision)
        late_overflow_result.set_attributes(silent=False,
                                            tag="late_overflow_result",
                                            debug=debug_multi,
                                            precision=self.precision)
        late_overflow_return = ConditionBlock(
            Test(late_overflow_result, specifier=Test.IsInfty, likely=False),
            ExpRaiseReturn(ML_FPE_Overflow,
                           return_value=FP_PlusInfty(self.precision)),
            Return(late_overflow_result, precision=self.precision))

        late_underflow_test = Comparison(k,
                                         self.precision.get_emin_normal(),
                                         specifier=Comparison.LessOrEqual,
                                         likely=False)
        underflow_exp_offset = 2 * self.precision.get_field_size()
        corrected_exp = Addition(
            ik,
            Constant(underflow_exp_offset,
                     precision=self.precision.get_integer_format()),
            precision=self.precision.get_integer_format(),
            tag="corrected_exp")
        late_underflow_result = (
            ExponentInsertion(corrected_exp, precision=self.precision) *
            poly) * ExponentInsertion(-underflow_exp_offset,
                                      precision=self.precision)
        late_underflow_result.set_attributes(debug=debug_multi,
                                             tag="late_underflow_result",
                                             silent=False)
        test_subnormal = Test(late_underflow_result,
                              specifier=Test.IsSubnormal)
        late_underflow_return = Statement(
            ConditionBlock(
                test_subnormal,
                ExpRaiseReturn(ML_FPE_Underflow,
                               return_value=late_underflow_result)),
            Return(late_underflow_result, precision=self.precision))

        twok = ExponentInsertion(ik,
                                 tag="exp_ik",
                                 debug=debug_multi,
                                 precision=self.precision)
        #std_result = twok * ((1 + exact_hi_part * pre_poly) + exact_lo_part * pre_poly)
        std_result = twok * poly
        std_result.set_attributes(tag="std_result", debug=debug_multi)
        result_scheme = ConditionBlock(
            late_overflow_test, late_overflow_return,
            ConditionBlock(late_underflow_test, late_underflow_return,
                           Return(std_result, precision=self.precision)))
        std_return = ConditionBlock(
            early_overflow_test, early_overflow_return,
            ConditionBlock(early_underflow_test, early_underflow_return,
                           result_scheme))

        # main scheme
        Log.report(Log.Info, "\033[33;1m MDL scheme \033[0m")
        scheme = ConditionBlock(
            test_nan_or_inf,
            Statement(ClearException() if self.libm_compliant else Statement(),
                      specific_return), std_return)

        return scheme
예제 #30
0
    def generate_scheme(self):
        # declaring CodeFunction and retrieving input variable
        vx = self.implementation.add_input_variable("x", self.precision)

        Log.report(Log.Info, "generating implementation scheme")
        if self.debug_flag:
            Log.report(Log.Info, "debug has been enabled")

        # local overloading of RaiseReturn operation
        def SincosRaiseReturn(*args, **kwords):
            kwords["arg_value"] = vx
            kwords["function_name"] = self.function_name
            return RaiseReturn(*args, **kwords)

        sollya_precision = self.precision.get_sollya_object()
        hi_precision = self.precision.get_field_size() - 8
        cw_hi_precision = self.precision.get_field_size() - 4

        ext_precision = {
            ML_Binary32: ML_Binary64,
            ML_Binary64: ML_Binary64
        }[self.precision]

        int_precision = {
            ML_Binary32: ML_Int32,
            ML_Binary64: ML_Int64
        }[self.precision]

        if self.precision is ML_Binary32:
            ph_bound = S2**10
        else:
            ph_bound = S2**33

        test_ph_bound = Comparison(vx,
                                   ph_bound,
                                   specifier=Comparison.GreaterOrEqual,
                                   precision=ML_Bool,
                                   likely=False)

        # argument reduction
        # m
        frac_pi_index = {ML_Binary32: 10, ML_Binary64: 14}[self.precision]

        C0 = Constant(0, precision=int_precision)
        C1 = Constant(1, precision=int_precision)
        C_offset = Constant(3 * S2**(frac_pi_index - 1),
                            precision=int_precision)

        # 2^m / pi
        frac_pi = round(S2**frac_pi_index / pi, cw_hi_precision, sollya.RN)
        frac_pi_lo = round(S2**frac_pi_index / pi - frac_pi, sollya_precision,
                           sollya.RN)
        # pi / 2^m, high part
        inv_frac_pi = round(pi / S2**frac_pi_index, cw_hi_precision, sollya.RN)
        # pi / 2^m, low part
        inv_frac_pi_lo = round(pi / S2**frac_pi_index - inv_frac_pi,
                               sollya_precision, sollya.RN)

        # computing k
        vx.set_attributes(tag="vx", debug=debug_multi)

        vx_pi = Addition(Multiplication(vx,
                                        Constant(frac_pi,
                                                 precision=self.precision),
                                        precision=self.precision),
                         Multiplication(vx,
                                        Constant(frac_pi_lo,
                                                 precision=self.precision),
                                        precision=self.precision),
                         precision=self.precision,
                         tag="vx_pi",
                         debug=debug_multi)

        k = NearestInteger(vx_pi,
                           precision=int_precision,
                           tag="k",
                           debug=debug_multi)
        # k in floating-point precision
        fk = Conversion(k,
                        precision=self.precision,
                        tag="fk",
                        debug=debug_multi)

        inv_frac_pi_cst = Constant(inv_frac_pi,
                                   tag="inv_frac_pi",
                                   precision=self.precision,
                                   debug=debug_multi)
        inv_frac_pi_lo_cst = Constant(inv_frac_pi_lo,
                                      tag="inv_frac_pi_lo",
                                      precision=self.precision,
                                      debug=debug_multi)

        # Cody-Waite reduction
        red_coeff1 = Multiplication(fk,
                                    inv_frac_pi_cst,
                                    precision=self.precision,
                                    exact=True)
        red_coeff2 = Multiplication(Negation(fk, precision=self.precision),
                                    inv_frac_pi_lo_cst,
                                    precision=self.precision,
                                    exact=True)

        # Should be exact / Sterbenz' Lemma
        pre_sub_mul = Subtraction(vx,
                                  red_coeff1,
                                  precision=self.precision,
                                  exact=True)

        # Fast2Sum
        s = Addition(pre_sub_mul,
                     red_coeff2,
                     precision=self.precision,
                     unbreakable=True,
                     tag="s",
                     debug=debug_multi)
        z = Subtraction(s,
                        pre_sub_mul,
                        precision=self.precision,
                        unbreakable=True,
                        tag="z",
                        debug=debug_multi)
        t = Subtraction(red_coeff2,
                        z,
                        precision=self.precision,
                        unbreakable=True,
                        tag="t",
                        debug=debug_multi)

        red_vx_std = Addition(s, t, precision=self.precision)
        red_vx_std.set_attributes(tag="red_vx_std", debug=debug_multi)

        # To compute sine we offset x by 3pi/2
        # which means add 3  * S2^(frac_pi_index-1) to k
        if self.sin_output:
            Log.report(Log.Info, "Computing Sin")
            offset_k = Addition(k,
                                C_offset,
                                precision=int_precision,
                                tag="offset_k")
        else:
            Log.report(Log.Info, "Computing Cos")
            offset_k = k

        modk = Variable("modk",
                        precision=int_precision,
                        var_type=Variable.Local)
        red_vx = Variable("red_vx",
                          precision=self.precision,
                          var_type=Variable.Local)

        # Faster modulo using bitwise logic
        modk_std = BitLogicAnd(offset_k,
                               2**(frac_pi_index + 1) - 1,
                               precision=int_precision,
                               tag="modk",
                               debug=debug_multi)

        approx_interval = Interval(-pi / (S2**(frac_pi_index + 1)),
                                   pi / S2**(frac_pi_index + 1))

        red_vx.set_interval(approx_interval)

        Log.report(Log.Info, "approx interval: %s\n" % approx_interval)

        Log.report(Log.Info,
                   "building tabulated approximation for sin and cos")

        error_function = lambda p, f, ai, mod, t: dirtyinfnorm(f - p, ai)

        # polynomial_scheme_builder = PolynomialSchemeEvaluator.generate_estrin_scheme
        polynomial_scheme_builder = PolynomialSchemeEvaluator.generate_horner_scheme

        table_index_size = frac_pi_index + 1
        cos_table = ML_NewTable(dimensions=[2**table_index_size, 1],
                                storage_precision=self.precision,
                                tag=self.uniquify_name("cos_table"))

        for i in range(2**(frac_pi_index + 1)):
            local_x = i * pi / S2**frac_pi_index
            cos_local = round(cos(local_x), self.precision.get_sollya_object(),
                              sollya.RN)
            cos_table[i][0] = cos_local

        sin_index = Modulo(modk + 2**(frac_pi_index - 1),
                           2**(frac_pi_index + 1),
                           precision=int_precision,
                           tag="sin_index")  #, debug = debug_multi)
        tabulated_cos = TableLoad(cos_table,
                                  modk,
                                  C0,
                                  precision=self.precision,
                                  tag="tab_cos",
                                  debug=debug_multi)
        tabulated_sin = -TableLoad(cos_table,
                                   sin_index,
                                   C0,
                                   precision=self.precision,
                                   tag="tab_sin",
                                   debug=debug_multi)

        poly_degree_cos = sup(
            guessdegree(cos(sollya.x), approx_interval, S2**
                        -self.precision.get_precision()) + 2)
        poly_degree_sin = sup(
            guessdegree(
                sin(sollya.x) / sollya.x, approx_interval, S2**
                -self.precision.get_precision()) + 2)

        poly_degree_cos_list = range(0, int(poly_degree_cos) + 3)
        poly_degree_sin_list = range(0, int(poly_degree_sin) + 3)

        # cosine polynomial: limiting first and second coefficient precision to 1-bit
        poly_cos_prec_list = [self.precision] * len(poly_degree_cos_list)
        # sine polynomial: limiting first coefficient precision to 1-bit
        poly_sin_prec_list = [self.precision] * len(poly_degree_sin_list)

        error_function = lambda p, f, ai, mod, t: dirtyinfnorm(f - p, ai)
        Log.report(Log.Info,
                   "building mathematical polynomials for sin and cos")
        # Polynomial approximations
        Log.report(Log.Info, "cos")
        poly_object_cos, poly_error_cos = Polynomial.build_from_approximation_with_error(
            cos(sollya.x),
            poly_degree_cos_list,
            poly_cos_prec_list,
            approx_interval,
            sollya.absolute,
            error_function=error_function)
        Log.report(Log.Info, "sin")
        poly_object_sin, poly_error_sin = Polynomial.build_from_approximation_with_error(
            sin(sollya.x),
            poly_degree_sin_list,
            poly_sin_prec_list,
            approx_interval,
            sollya.absolute,
            error_function=error_function)

        Log.report(
            Log.Info, "poly error cos: {} / {:d}".format(
                poly_error_cos, int(sollya.log2(poly_error_cos))))
        Log.report(
            Log.Info, "poly error sin: {0} / {1:d}".format(
                poly_error_sin, int(sollya.log2(poly_error_sin))))
        Log.report(Log.Info, "poly cos : %s" % poly_object_cos)
        Log.report(Log.Info, "poly sin : %s" % poly_object_sin)

        # Polynomial evaluation scheme
        poly_cos = polynomial_scheme_builder(
            poly_object_cos.sub_poly(start_index=1),
            red_vx,
            unified_precision=self.precision)
        poly_sin = polynomial_scheme_builder(
            poly_object_sin.sub_poly(start_index=2),
            red_vx,
            unified_precision=self.precision)
        poly_cos.set_attributes(tag="poly_cos", debug=debug_multi)
        poly_sin.set_attributes(tag="poly_sin",
                                debug=debug_multi,
                                unbreakable=True)

        # TwoProductFMA
        mul_cos_x = tabulated_cos * poly_cos
        mul_cos_y = FusedMultiplyAdd(tabulated_cos,
                                     poly_cos,
                                     -mul_cos_x,
                                     precision=self.precision)

        mul_sin_x = tabulated_sin * poly_sin
        mul_sin_y = FusedMultiplyAdd(tabulated_sin,
                                     poly_sin,
                                     -mul_sin_x,
                                     precision=self.precision)

        mul_coeff_sin_hi = tabulated_sin * red_vx
        mul_coeff_sin_lo = FusedMultiplyAdd(tabulated_sin, red_vx,
                                            -mul_coeff_sin_hi)

        mul_cos = Addition(mul_cos_x,
                           mul_cos_y,
                           precision=self.precision,
                           tag="mul_cos")  #, debug = debug_multi)
        mul_sin = Negation(Addition(mul_sin_x,
                                    mul_sin_y,
                                    precision=self.precision),
                           precision=self.precision,
                           tag="mul_sin")  #, debug = debug_multi)
        mul_coeff_sin = Negation(Addition(mul_coeff_sin_hi,
                                          mul_coeff_sin_lo,
                                          precision=self.precision),
                                 precision=self.precision,
                                 tag="mul_coeff_sin")  #, debug = debug_multi)

        mul_cos_x.set_attributes(
            tag="mul_cos_x", precision=self.precision)  #, debug = debug_multi)
        mul_cos_y.set_attributes(
            tag="mul_cos_y", precision=self.precision)  #, debug = debug_multi)
        mul_sin_x.set_attributes(
            tag="mul_sin_x", precision=self.precision)  #, debug = debug_multi)
        mul_sin_y.set_attributes(
            tag="mul_sin_y", precision=self.precision)  #, debug = debug_multi)

        cos_eval_d_1 = (((mul_cos + mul_sin) + mul_coeff_sin) + tabulated_cos)

        cos_eval_d_1.set_attributes(tag="cos_eval_d_1",
                                    precision=self.precision,
                                    debug=debug_multi)

        result_1 = Statement(Return(cos_eval_d_1))

        #######################################################################
        #                    LARGE ARGUMENT MANAGEMENT                        #
        #                 (lar: Large Argument Reduction)                     #
        #######################################################################
        # payne and hanek argument reduction for large arguments
        ph_k = frac_pi_index
        ph_frac_pi = round(S2**ph_k / pi, 1500, sollya.RN)
        ph_inv_frac_pi = pi / S2**ph_k

        ph_statement, ph_acc, ph_acc_int = generate_payne_hanek(vx,
                                                                ph_frac_pi,
                                                                self.precision,
                                                                n=100,
                                                                k=ph_k)

        # assigning Large Argument Reduction reduced variable
        lar_vx = Variable("lar_vx",
                          precision=self.precision,
                          var_type=Variable.Local)

        lar_red_vx = Addition(Multiplication(lar_vx,
                                             inv_frac_pi,
                                             precision=self.precision),
                              Multiplication(lar_vx,
                                             inv_frac_pi_lo,
                                             precision=self.precision),
                              precision=self.precision,
                              tag="lar_red_vx",
                              debug=debug_multi)

        C32 = Constant(2**(ph_k + 1), precision=int_precision, tag="C32")
        ph_acc_int_red = Select(ph_acc_int < C0,
                                C32 + ph_acc_int,
                                ph_acc_int,
                                precision=int_precision,
                                tag="ph_acc_int_red")
        if self.sin_output:
            lar_offset_k = Addition(ph_acc_int_red,
                                    C_offset,
                                    precision=int_precision,
                                    tag="lar_offset_k")
        else:
            lar_offset_k = ph_acc_int_red

        ph_acc_int_red.set_attributes(tag="ph_acc_int_red", debug=debug_multi)
        lar_modk = BitLogicAnd(lar_offset_k,
                               2**(frac_pi_index + 1) - 1,
                               precision=int_precision,
                               tag="lar_modk",
                               debug=debug_multi)

        lar_statement = Statement(ph_statement,
                                  ReferenceAssign(lar_vx,
                                                  ph_acc,
                                                  debug=debug_multi),
                                  ReferenceAssign(red_vx,
                                                  lar_red_vx,
                                                  debug=debug_multi),
                                  ReferenceAssign(modk, lar_modk),
                                  prevent_optimization=True)

        test_NaN_or_Inf = Test(vx,
                               specifier=Test.IsInfOrNaN,
                               likely=False,
                               tag="NaN_or_Inf",
                               debug=debug_multi)
        return_NaN_or_Inf = Statement(Return(FP_QNaN(self.precision)))

        scheme = ConditionBlock(
            test_NaN_or_Inf, Statement(ClearException(), return_NaN_or_Inf),
            Statement(
                modk, red_vx,
                ConditionBlock(
                    test_ph_bound, lar_statement,
                    Statement(
                        ReferenceAssign(modk, modk_std),
                        ReferenceAssign(red_vx, red_vx_std),
                    )), result_1))

        return scheme