Пример #1
0
def fast_etienne_likelihood(mod, params, kda=None, kda_x=None):
    '''
    same as Abundance inner function, but takes advantage of constant
    m value when varying only theta.

    :argument abd: Abundance object
    :argument params: list containing theta and m
    :argument kda_x: precomputed list of exp(kda + ind*immig)
    '''
    theta     = params[0]
    immig     = float (params[1]) / (1 - params[1]) * (mod.community.J - 1)
    log_immig = log (immig)
    theta_s   = theta + mod.community.S
    if not kda_x:
        kda_x = [exp(mod._kda[val] + val * log_immig) for val in \
                 xrange (mod.community.J - mod.community.S)]
    poch1 = exp (mod._factor + log (theta) * mod.community.S - \
                 lpoch (immig, mod.community.J) + \
                 log_immig * mod.community.S + lngamma(theta))
    gam_theta_s = gamma (theta_s)
    lik = mpfr(0.0)
    for val in xrange (mod.community.J - mod.community.S):
        lik += poch1 * kda_x[val] / gam_theta_s
        gam_theta_s *= theta_s + val
    return ((-log (lik)), kda_x)
Пример #2
0
 def likelihood(self, params):
     '''
     log-likelihood function
     
     :argument params: a list of 2 parameters:
     
       * theta = params[0]
       * m     = params[1]
     :returns: log likelihood of given theta and I
     
     '''
     kda       = self._kda
     theta     = params[0]
     immig     = float(params[1]) / (1 - params[1]) * (self.community.J - 1)
     log_immig = log(immig)
     theta_s   = theta + self.community.S
     poch1 = exp(self._factor + log(theta) * self.community.S - \
                 lpoch(immig, self.community.J) + \
                 log_immig * self.community.S + lngamma(theta))
     gam_theta_s = gamma(theta_s)
     lik = mpfr(0.0)
     for abd in xrange(int(self.community.J - self.community.S)):
         lik += poch1 * exp(kda[abd] + abd * log_immig) / gam_theta_s
         gam_theta_s *= theta_s + abd
     return -log(lik)
Пример #3
0
def exponential_mechanism_big(data,
                              domain,
                              quality_function,
                              eps,
                              bulk=False,
                              for_sparse=False):
    """Exponential Mechanism that can deal with very large or very small qualities
    exponential_mechanism ( data , domain , quality function , privacy parameter )
    :param data: list or array of values
    :param domain: list of possible results
    :param quality_function: function which get as input the data and a domain element and 'qualifies' it
    :param eps: privacy parameter
    :param bulk: in case that we can reduce run-time by evaluating the quality of the whole domain in bulk,
    the procedure will be given a 'bulk' quality function. meaning that instead of one domain element the
    quality function get the whole domain as input
    :param for_sparse: in cases that the domain is a very spared one, namely a big percent of the domain has quality 0,
    there is a special procedure called sparse_domain. That procedure needs, beside that result from the given
    mechanism, the total weight of the domain whose quality is more than 0. If that is the case Exponential-Mechanism
    will return also the P DF before the normalization.
    :return: an element of domain with approximately maximum value of quality function
    """

    # calculate a list of probabilities for each element in the domain D
    # probability of element d in domain proportional to exp(eps*quality(data,d)/2)
    if bulk:
        qualified_domain = quality_function(data, domain)
        domain_pdf = [gmpy2.exp(eps * q / 2) for q in qualified_domain]
    else:
        domain_pdf = [
            gmpy2.exp(eps * quality_function(data, d) / 2) for d in domain
        ]
    total_value = sum(domain_pdf)
    domain_pdf = [d / total_value for d in domain_pdf]
    normalizer = sum(domain_pdf)
    # for debugging and other reasons: check that domain_cdf indeed defines a distribution
    # use the uniform distribution (from 0 to 1) to pick an elements by the CDF
    if abs(normalizer - 1) > 0.001:
        raise ValueError('ERR: exponential_mechanism, sum(domain_pdf) != 1.')

    # accumulate elements to get the CDF of the exponential distribution
    domain_cdf = np.cumsum(domain_pdf).tolist()
    # pick a uniformly random value on the CDF
    pick = np.random.uniform()

    # return the index corresponding to the pick
    # take the min between the index and  len(D)-1 to prevent returning index out of bound
    result = domain[min(np.searchsorted(domain_cdf, pick), len(domain) - 1)]
    # in exponential_mechanism_sparse we need also the total_sum value
    if for_sparse:
        return result, total_value
    return result
Пример #4
0
    def lslmsr(q_, i, a):
        assert q_[i] + a >= 0
        q = q_[:]
        alpha = 1/16.
        scale = 12
        Bq1 = alpha*scale*sum(q)
        C1 = Bq1*gmpy2.log(sum(gmpy2.exp(q_i/Bq1) for q_i in q))
        q[i] += a
        Bq2 = alpha*scale*sum(q)
        C2 = Bq2*gmpy2.log(sum(gmpy2.exp(q_i/Bq2) for q_i in q))

        if a > 0:
            return (C2 - C1)*1.015625
        else:
            return (C1 - C2)*0.984375
Пример #5
0
    def lslmsr(q_, i, a):
        assert q_[i] + a >= 0
        q = q_[:]
        alpha = 1 / 16.
        scale = 12
        Bq1 = alpha * scale * sum(q)
        C1 = Bq1 * gmpy2.log(sum(gmpy2.exp(q_i / Bq1) for q_i in q))
        q[i] += a
        Bq2 = alpha * scale * sum(q)
        C2 = Bq2 * gmpy2.log(sum(gmpy2.exp(q_i / Bq2) for q_i in q))

        if a > 0:
            return (C2 - C1) * 1.015625
        else:
            return (C1 - C2) * 0.984375
Пример #6
0
def exact_fft(x, debug=False):
    ''' Calculates the exact DFT using gmpy2

    Has ~N^2 efficiency but I tried to optimize the calculation a bit.
    '''
    N = x.shape[0]
    start = time.clock()
    arg = -2j * (gmpy2.const_pi() / N)
    exp1d = np.array([gmpy2.exp(arg * idx) for idx in range(N)])
    exp = np.diag(exp1d)
    for i in range(N):
        for j in range(i+1):
            exp[j, i] = exp[i, j] = exp1d[(j * i) % N]
    stop = time.clock()
    if debug:
        print('preparation %.2fms' % ((stop-start)*1e3))

    start = time.clock()
    x = np.array([gmpy2.mpc(xi) for xi in x])
    # fsum solution - I noticed no difference except that it ran slower
#    y = []
#    for i in range(N):
#        mul = x * exp[i, :]
#        y.append(gmpy2.fsum([m.real for m in mul]) + 1j *
#                 gmpy2.fsum([m.imag for m in mul]))
#    y = np.array(y, dtype=np.complex128)
    y = np.sum(exp * x, axis=-1)
    y = np.array(y, dtype=np.complex128)
    stop = time.clock()
    if debug:
        print('calculation %.2fms' % ((stop-start)*1e3))
    return y
Пример #7
0
 def evaluate_error_at_sample(self, tree):
     """ Sample from the leaf then evaluate tree in the tree's working precision"""
     if tree.left is not None or tree.right is not None:
         if tree.left is not None:
             sample_l, lp_sample_l = self.evaluate_error_at_sample(
                 tree.left)
         if tree.right is not None:
             sample_r, lp_sample_r = self.evaluate_error_at_sample(
                 tree.right)
         if tree.root_name == "+":
             return (sample_l + sample_r), gmpy2.add(
                 mpfr(str(lp_sample_l)), mpfr(str(lp_sample_r)))
         elif tree.root_name == "-":
             return (sample_l - sample_r), gmpy2.sub(
                 mpfr(str(lp_sample_l)), mpfr(str(lp_sample_r)))
         elif tree.root_name == "*":
             return (sample_l * sample_r), gmpy2.mul(
                 mpfr(str(lp_sample_l)), mpfr(str(lp_sample_r)))
         elif tree.root_name == "/":
             return (sample_l / sample_r), gmpy2.div(
                 mpfr(str(lp_sample_l)), mpfr(str(lp_sample_r)))
         elif tree.root_name == "exp":
             return np.exp(sample_l), gmpy2.exp(mpfr(str(sample_l)))
         elif tree.root_name == "sin":
             return np.sin(sample_l), gmpy2.sin(mpfr(str(sample_l)))
         elif tree.root_name == "cos":
             return np.cos(sample_l), gmpy2.cos(mpfr(str(sample_l)))
         elif tree.root_name == "abs":
             return np.abs(sample_l), abs(mpfr(str(sample_l)))
         else:
             print("Operation not supported!")
             exit(-1)
     else:
         sample = tree.root_value[0].getSampleSet(n=1)[0]
         return sample, mpfr(str(sample))
Пример #8
0
def test_interps_random(trials, *range_args):
    log_min, log_max = 1, gmpy2.exp(128)
    exp_min, exp_max = 0, 128

    datastr = ',\n\t\t'.join([
        'avg_abs_mid_50:%E%%', 'min_rel:%E%%', 
        'max_rel:%E%%', 'median_rel:%E%%',
        'max_diff:%E%%', 'min_diff:%E%%'
    ])
    errstr = '\terror in fx_log:\n\t\t'
    errstr += datastr
    errstr += '\n\terror in fx_exp:\n\t\t'
    errstr += datastr

    for i in range(*range_args):
        
        log2_poly = make_fx_poly(optimal_interp(gmpy2.log2, i, 1, 2))
        exp2_poly = make_fx_poly(optimal_interp(gmpy2.exp2, i, 0, 1))

        logf = lambda x: fx_log(x, log2_poly)
        expf = lambda x: fx_exp(x, exp2_poly)
        
        max_log_err = fx_relative_random_error(logf, gmpy2.log, trials, log_min, log_max)
        max_exp_err = fx_relative_random_error(expf, gmpy2.exp, trials, exp_min, exp_max)

        errs = max_log_err + max_exp_err
        print "Relative error using %d Chebyshev nodes:" % i
        print errstr % errs
Пример #9
0
def isEventCausal_LogClock(log1, log2, prime1, prime2, receivedPrimes1, receivedPrimes2):
    set_integral_precision_scratch_space()
    diff = sub(log2, log1)
    if diff < 0:
        return False

    isCausal = False

    exp_value = gmpy2.exp(diff)
    nearest_multiple_of_prime = nearest_multiple(multiply(exp_value, prime1), prime1)

    #threshold = 10% of prime1
    ten_percent_base_multiplier  = multiply(prime1, mpfr('0.0000000005'))

    upper_threshold_value = add(nearest_multiple_of_prime, multiply(nearest_multiple_of_prime, ten_percent_base_multiplier))
    lower_threshold_value = sub(nearest_multiple_of_prime,multiply(nearest_multiple_of_prime, ten_percent_base_multiplier) )
    actual_value = multiply(exp_value,prime1)

    if actual_value < upper_threshold_value and actual_value > lower_threshold_value :
        isCausal = True
    else:
        isCausal = False


    # if abs(sub(exp_value, rounded_exp_value)    ) < acceptable_difference:
    #     isCausal = True
    # else:
    #     isCausal =   False

    set_logarithmic_precision_persistent()

    return  isCausal
Пример #10
0
def metric_griffiths_2004(logliks):
    """
    Calculate metric as in [GriffithsSteyvers2004]_.

    Calculates the harmonic mean of the log-likelihood values `logliks`. Burn-in values
    should already be removed from `logliks`.

    .. [GriffithsSteyvers2004] Thomas L. Griffiths and Mark Steyvers. 2004. Finding scientific topics. Proceedings of
                               the National Academy of Sciences 101, suppl 1: 5228–5235.
                               http://doi.org/10.1073/pnas.0307752101

    .. note:: Requires `gmpy2 <https://github.com/aleaxit/gmpy>`_ package for multiple-precision arithmetic to avoid
              numerical underflow.

    :param logliks: array with log-likelihood values
    :return: calculated metric
    """

    import gmpy2

    # using median trick as in Martin Ponweiser's Diploma Thesis 2012, p.36
    ll_med = np.median(logliks)
    ps = [gmpy2.exp(ll_med - x) for x in logliks]
    ps_mean = gmpy2.mpfr(0)
    for p in ps:
        ps_mean += p / len(ps)
    return float(ll_med - gmpy2.log(ps_mean)
                 )  # after taking the log() we can use a Python float() again
Пример #11
0
def main():
    start = time.time()
    results = multiprocessing.Queue()
    updates = multiprocessing.Queue()
    trials = 200
    max_input = int(gmpy2.exp(128))
    degree = int(sys.argv[2])
    num_points = int(sys.argv[1])
    n = multiprocessing.cpu_count()
    processes = []
    proc_results = []
    size = int(gmpy2.comb(num_points, degree)) / n
    combs = itertools.combinations(numpy.linspace(1, 2, num_points), degree)
    for i in range(n):
        xs = [x for _, x in zip(range(size), combs)]
        proc = multiprocessing.Process(target=worker, args=(xs, results, updates, trials, max_input))
        print "starting process %d" % i
        proc.start()
        processes.append(proc)
    for i in range(2*n):
        print updates.get()
    for i in range(n):
        proc_results.append(results.get())
    for proc in processes:
        proc.join()
    pretty_errors(min(proc_results))
    print "Time Elapsed: %.2f seconds." % (time.time() - start)
Пример #12
0
def test_interps_random(trials, *range_args):
    log_min, log_max = 1, gmpy2.exp(128)
    exp_min, exp_max = 0, 128

    datastr = ',\n\t\t'.join([
        'avg_abs_mid_50:%E%%', 'min_rel:%E%%', 
        'max_rel:%E%%', 'mode_rel:%E%%',
        'max_diff:%E%%', 'min_diff:%E%%'
    ])
    errstr = '\terror in fx_log:\n\t\t'
    errstr += datastr
    errstr += '\n\terror in fx_exp:\n\t\t'
    errstr += datastr

    for i in range(*range_args):
        
        log2_poly = make_fx_poly(optimal_interp(gmpy2.log2, i, 1, 2))
        exp2_poly = make_fx_poly(optimal_interp(gmpy2.exp2, i, 0, 1))

        logf = lambda x: fx_log(x, log2_poly)
        expf = lambda x: fx_exp(x, exp2_poly)
        
        max_log_err = fx_relative_random_error(logf, gmpy2.log, trials, log_min, log_max)
        max_exp_err = fx_relative_random_error(expf, gmpy2.exp, trials, exp_min, exp_max)

        errs = max_log_err + max_exp_err
        print "Relative error using %d Chebyshev nodes:" % i
        print errstr % errs
Пример #13
0
    def errbound(x):
        # absolute value of the imaginary component
        t = abs(complex(x).imag)

        # calculate error term
        et = (3 / (3 + sqrt(8))**n) * (
            (1 + 2 * t) * exp(t * pi / 2)) / (1 - 2**(1 - x))
        return abs(et)
Пример #14
0
def exponential_mechanism_big(data, domain, quality_function, eps, bulk=False, for_sparse=False):
    """Exponential Mechanism that can deal with very large or very small qualities
    exponential_mechanism ( data , domain , quality function , privacy parameter )
    :param data: list or array of values
    :param domain: list of possible results
    :param quality_function: function which get as input the data and a domain element and 'qualifies' it
    :param eps: privacy parameter
    :param bulk: in case that we can reduce run-time by evaluating the quality of the whole domain in bulk,
    the procedure will be given a 'bulk' quality function. meaning that instead of one domain element the
    quality function get the whole domain as input
    :param for_sparse: in cases that the domain is a very spared one, namely a big percent of the domain has quality 0,
    there is a special procedure called sparse_domain. That procedure needs, beside that result from the given
    mechanism, the total weight of the domain whose quality is more than 0. If that is the case Exponential-Mechanism
    will return also the P DF before the normalization.
    :return: an element of domain with approximately maximum value of quality function
    """

    # calculate a list of probabilities for each element in the domain D
    # probability of element d in domain proportional to exp(eps*quality(data,d)/2)
    if bulk:
        qualified_domain = quality_function(data, domain)
        domain_pdf = [gmpy2.exp(eps * q / 2) for q in qualified_domain]
    else:
        domain_pdf = [gmpy2.exp(eps * quality_function(data, d) / 2) for d in domain]
    total_value = sum(domain_pdf)
    domain_pdf = [d / total_value for d in domain_pdf]
    normalizer = sum(domain_pdf)
    # for debugging and other reasons: check that domain_cdf indeed defines a distribution
    # use the uniform distribution (from 0 to 1) to pick an elements by the CDF
    if abs(normalizer - 1) > 0.001:
        raise ValueError('ERR: exponential_mechanism, sum(domain_pdf) != 1.')

    # accumulate elements to get the CDF of the exponential distribution
    domain_cdf = np.cumsum(domain_pdf).tolist()
    # pick a uniformly random value on the CDF
    pick = np.random.uniform()

    # return the index corresponding to the pick
    # take the min between the index and  len(D)-1 to prevent returning index out of bound
    result = domain[min(np.searchsorted(domain_cdf, pick), len(domain)-1)]
    # in exponential_mechanism_sparse we need also the total_sum value
    if for_sparse:
        return result, total_value
    return result
Пример #15
0
def p_line(k, L, n, u):
    summary = 0.0
    tmin = k - 1
    tmax = L - (n - k) - u
    static = to_sum(L - n + 1, L)
    static1 = to_sum(n - k, n)
    static2 = ln_factorial(k - 1)
    for t in xrange(tmin, tmax + 1):
        summary += exp(variant(t, u, k, n, L) - static + static1 - static2)
        print t, "out of", tmax
    return summary
Пример #16
0
def my_gamma(x, a, g):
    n = len(a)
    x = mpfr(x)

    # calculate sum of partial fractions
    ret = ret[0]
    for i in range(1, n):
        ret += x[i] / (x + i)

    # temporary variable
    tmp = x + g + 0.5
    return sqrt(2 * pi) * tmp**(x + 0.5) * exp(-tmp) * ret
Пример #17
0
def graph_errors(*range_args):
    exp_min, exp_max = 0, 4
    exp_xs = map(gmpy2.mpfr, linspace(exp_min, exp_max, 10000))
    exp_ys = map(gmpy2.exp, exp_xs)

    log_min, log_max = 1, gmpy2.exp(exp_max)
    log_xs = map(gmpy2.mpfr, linspace(log_min, log_max, 10000))
    log_ys = map(gmpy2.log, log_xs)

    funcs = [
        (fx_exp, gmpy2.exp2, 0, 1, exp_xs, exp_ys, 'exp'),
        (fx_log, gmpy2.log2, 1, 2, log_xs, log_ys, 'log'),
    ]

    for i in range(*range_args):
        for func_items in funcs:
            fx_func, interp_func = func_items[:2]
            interp_min, interp_max = func_items[2:4]
            ref_xs, ref_ys = func_items[4:6]
            name = func_items[6]
            p_i = make_fx_poly(
                optimal_interp(
                    interp_func,
                    i,
                    interp_min,
                    interp_max
                )
            )
            fx_f = lambda x: fx_func(int(x * 2**64), p_i)/gmpy2.mpfr(1 << 64)
            fx_ys = map(fx_f, ref_xs)
            first_diff = map(lambda a, b: b - a, fx_ys[:-1], fx_ys[1:])
            fig, axes = plt.subplots(3, sharex=True)
            axes[0].set_title('$\\%s(x)$ and $\\%s_{fx}(x)$' % (name, name))
            axes[0].plot(ref_xs, ref_ys, label=('$\\%s$' % name))
            axes[0].plot(ref_xs, fx_ys, label=('$\\%s_{fx}$' % name))
            axes[1].set_title('$(\\%s_{fx} - \\%s)(x)$' % (name, name))
            axes[1].plot(ref_xs, map(lambda a, b: a-b, fx_ys, ref_ys))
            axes[2].set_title('$\\frac{d}{dx}(\\%s_{fx})$' % name)
            axes[2].plot(ref_xs[:-1], first_diff)
            fig.savefig('chebyshev-%s-%d.png'%(name, i))

            if any(map(lambda a: 1 if a < 0 else 0, first_diff)):
                print "\033[1;31mBAD FIRST DIFF!!!!! fx_%s with %d nodes\033[0m" % (name, i)
Пример #18
0
def graph_errors(*range_args):
    exp_min, exp_max = 0, 4
    exp_xs = map(gmpy2.mpfr, linspace(exp_min, exp_max, 10000))
    exp_ys = map(gmpy2.exp, exp_xs)

    log_min, log_max = 1, gmpy2.exp(exp_max)
    log_xs = map(gmpy2.mpfr, linspace(log_min, log_max, 10000))
    log_ys = map(gmpy2.log, log_xs)

    funcs = [
        (fx_exp, gmpy2.exp2, 0, 1, exp_xs, exp_ys, 'exp'),
        (fx_log, gmpy2.log2, 1, 2, log_xs, log_ys, 'log'),
    ]

    for i in range(*range_args):
        for func_items in funcs:
            fx_func, interp_func = func_items[:2]
            interp_min, interp_max = func_items[2:4]
            ref_xs, ref_ys = func_items[4:6]
            name = func_items[6]
            p_i = make_fx_poly(
                optimal_interp(
                    interp_func,
                    i,
                    interp_min,
                    interp_max
                )
            )
            fx_f = lambda x: fx_func(int(x * 2**64), p_i)/gmpy2.mpfr(1 << 64)
            fx_ys = map(fx_f, ref_xs)
            first_diff = map(lambda a, b: b - a, fx_ys[:-1], fx_ys[1:])
            fig, axes = plt.subplots(3, sharex=True)
            axes[0].set_title('$\\%s(x)$ and $\\%s_{fx}(x)$' % (name, name))
            axes[0].plot(ref_xs, ref_ys, label=('$\\%s$' % name))
            axes[0].plot(ref_xs, fx_ys, label=('$\\%s_{fx}$' % name))
            axes[1].set_title('$(\\%s_{fx} - \\%s)(x)$' % (name, name))
            axes[1].plot(ref_xs, map(lambda a, b: a-b, fx_ys, ref_ys))
            axes[2].set_title('$\\frac{d}{dx}(\\%s_{fx})$' % name)
            axes[2].plot(ref_xs[:-1], first_diff)
            fig.savefig('chebyshev-%s-%d.png'%(name, i))

            if any(map(lambda a: 1 if a < 0 else 0, first_diff)):
                print "\033[1;31mBAD FIRST DIFF!!!!! fx_%s with %d nodes\033[0m" % (name, i)
Пример #19
0
def fourier(func, dt, freq_max):

    g = func
    N = len(g)
    Wf = N

    dt = mpfr(dt)
    t = [dt * n for n in range(N)]

    dw = mpfr(freq_max) / (N - 1)
    f = [dw * n for n in range(N)]

    G = [0] * Wf

    for w in range(Wf):

        G[w] = sum(g[n] * exp(2 * I * pi * f[w] * t[n]) * dt for n in range(N))

    return f, G
Пример #20
0
def p_line(k, L, n, u):
    t1 = clock()
    summary = 0.0
    tmin = k - 1
    tmax = L - (n - k) - u
    st = to_sum(L - n + 1, L)
    st1 = to_sum(n - k, n)
    st2 = ln_factorial(k - 1)
    stat = exp(st1 - st - st2)

    f = np.vectorize(variant, otypes=[np.float])
    exp1 = np.vectorize(my_exp)
    summary = np.sum(exp1(f(np.arange(tmin, tmax+1),u,k,n,L)))

    # for t in xrange(tmin, tmax + 1):
    #     summary += exp(variant(t, u, k, n, L) - static + static1 - static2)
    print ("P_line time:", clock()-t1)

    return summary*stat
def graph_rb(graph,label):
    eigen_vals = nx.linalg.spectrum.adjacency_spectrum(graph, weight='weight')
    max_val = 0
    ret_val = mpfr('0')

    for vals in eigen_vals:
        if vals > max_val:
            max_val = vals
    print(max_val)
    eigen_vals = np.array(eigen_vals, dtype=np.float128)
    print(eigen_vals)
    
    for i in range(len(eigen_vals)):
        ret_val += gmpy2.exp(mpfr(str(eigen_vals[i])))
    ret_val /= 500
    ret_val = gmpy2.log(ret_val)

    print(label,str(np.real(ret_val)))
    
    return np.real(ret_val)
Пример #22
0
def inv_fourier(transf, dt, freq_max):

    G = transf
    N = len(G)
    Wf = N

    dt = mpfr(dt)
    t = [dt * n for n in range(N)]

    dw = mpfr(freq_max) / (N - 1)
    f = [dw * n for n in range(N)]

    g = [0] * N

    for n in range(N):

        g[n] = sum(G[w] * exp(-2 * I * pi * f[w] * t[n]) * dw
                   for w in range(Wf))

    return backf(map(lambda x: x.real, g))
Пример #23
0
 def term(x, y):
     dot = ß.dot(x)
     if y:
         return dot - log(1 + exp(dot))
     else:
         return -log(1 + exp(dot))
Пример #24
0
def get_gamma_table(n, g):

    # we need to generate the array of coefficients `a` such that:
    # Gamma(x) = (x + g + 0.5)^(x+0.5) / (e^(x+g-0.5)) * L_g(x)
    # L_g(x) = a[0] + sum(a[k] / (z + k) for k in range(1, N))

    # essentially, we construct some matrices from number-theoretic functions
    #   and we can generate the coefficients of the partial fraction terms `1 / (z + k)`
    # This greatly simplifies from the native `z(z-1).../((z+1)(z+2)...)` form, which
    #   would require a lot more operations to implement internally (see definition of Ag(z) on wikipedia)

    # calculate an element for the 'B' matrix (n x n)
    def getB(i, j):
        if i == 0:
            return 1
        elif i > 0 and j >= i:
            return (-1)**(j - i) * choose(i + j - 1, j - i)
        else:
            return 0

    # calculate an element for the 'C' matrix (n x n)
    def getC(i, j):
        if i == j and i == 0:
            return mpfr(0.5)
        elif j > i:
            return 0
        else:
            # this is the closed form instead of calculating a sum via the 'S' symbol mentioned in some places
            return int((-1)**(i - j) * 4**j * i * factorial(i + j - 1) /
                       (factorial(i - j) * factorial(2 * j)))

    # calculate an element for the 'Dc' matrix (n x n)
    @lru_cache
    def getDc(i, j):
        if i != j:
            # it's a diagonal matrix, so return 0 for all non-diagonal elements
            return 0
        else:
            # otherwise, compute via the formula given
            return 2 * double_factorial(2 * i - 1)

    # calculate an element for the 'Dr' matrix (n x n)
    @lru_cache
    def getDr(i, j):
        # it's diagonal, so filter out non-diagonal efforts
        if i != j:
            return 0
        elif i == 0:
            return 1
        else:
            # guaranteed to be a integer, so cast it (so no precision is lost)
            return -int(
                factorial(2 * i) / (2 * factorial(i) * factorial(i - 1)))

    # generate matrices from their generator functions as 2D lists
    # NOTE: this obviously isn't very efficient, but it allows arbitrary precision elements,
    #   which numpy does not
    # these matrices are size <100, so it won't be that bad anyway
    B = [[getB(i, j) for j in range(n)] for i in range(n)]
    C = [[getC(i, j) for j in range(n)] for i in range(n)]
    Dc = [[getDc(i, j) for j in range(n)] for i in range(n)]
    Dr = [[getDr(i, j) for j in range(n)] for i in range(n)]

    # the `f` vector, defined as `F` but without the double rising factorial (which Dc has)
    # I left this in here instead of combining here to be more accurate to
    #   the method given in 4
    f_gn = [sqrt(2) * (e / (2 * (i + g) + 1))**(i + 0.5) for i in range(n)]

    # multiply matrices X*Y*...
    def matmul(X, Y, *args):
        if args:
            return matmul(matmul(X, Y), *args)
        else:
            # nonrecursive
            assert len(X[0]) == len(Y)
            M, N, K = len(X), len(Y[0]), len(Y)

            # GEMM kernel (very inefficient; but doesn't matter due to AP floats & small matrix sizes)
            return [[
                sum(X[i][k] * Y[k][j] for k in range(K)) for j in range(N)
            ] for i in range(M)]

    # normalization factor; we multiply everything by this so it is `pretty close` to 1.0
    W = exp(g) / sqrt(2 * pi)

    # get the resulting coefficients
    # NOTE: we should get a column vector back, so return the 0th element of each row to get the coefficients
    a = list(
        map(lambda x: W * x[0],
            matmul(Dr, B, C, Dc, [[f_gn[i]] for i in range(n)])))

    # compute 'p' coefficients (only needed for the error bound function)
    p = [
        sum([getC(2 * j, 2 * j) * f_gn[j] * Dc[j][j] for j in range(i)])
        for i in range(n)
    ]

    # error bound; does not depend on 'x'
    def errbound():
        # given: err <= |pi/2*W*( sqrt(pi) - u*a )|
        # compute dot product `u * a`
        dot_ua = (a[0] + sum([2 * a[i] / mpfr(2 * i - 1)
                              for i in range(1, n)])) / W
        # compute full formulat
        return abs(pi / 2 * W) * abs(sqrt(pi) - dot_ua)

    # return them
    return a, errbound
Пример #25
0
def metric_held_out_documents_wallach09(dtm_test,
                                        theta_test,
                                        phi_train,
                                        alpha,
                                        n_samples=10000):
    """
    Estimation of the probability of held-out documents according to [Wallach2009]_ using a
    document-topic estimation `theta_test` that was estimated via held-out documents `dtm_test` on a trained model with
    a topic-word distribution `phi_train` and a document-topic prior `alpha`. Draw `n_samples` according to `theta_test`
    for each document in `dtm_test` (memory consumption and run time can be very high for larger `n_samples` and
    a large amount of big documents in `dtm_test`).

    A document-topic estimation `theta_test` can be obtained from a trained model from the "lda" package or scikit-learn
    package with the `transform()` method.

    Adopted MATLAB code `originally from Ian Murray, 2009 <https://people.cs.umass.edu/~wallach/code/etm/>`_ and
    downloaded from `umass.edu <https://people.cs.umass.edu/~wallach/code/etm/lda_eval_matlab_code_20120930.tar.gz>`_.

    .. note:: Requires `gmpy2 <https://github.com/aleaxit/gmpy>`_ package for multiple-precision arithmetic to avoid
              numerical underflow.

    .. [Wallach2009] Wallach, H.M., Murray, I., Salakhutdinov, R. and Mimno, D., 2009. Evaluation methods for
                     topic models.

    :param dtm_test: held-out documents of shape NxM with N documents and vocabulary size M
    :param theta_test: document-topic estimation of `dtm_test`; shape NxK with K topics
    :param phi_train: topic-word distribution of a trained topic model that should be evaluated; shape KxM
    :param alpha: document-topic prior of the trained topic model that should be evaluated; either a scalar or an array
                  of length K
    :return: estimated probability of held-out documents
    """
    import gmpy2

    n_test_docs, n_vocab = dtm_test.shape

    if n_test_docs != theta_test.shape[0]:
        raise ValueError(
            'shapes of `dtm_test` and `theta_test` do not match (unequal number of documents)'
        )

    _, n_topics = theta_test.shape

    if n_topics != phi_train.shape[0]:
        raise ValueError(
            'shapes of `theta_test` and `phi_train` do not match (unequal number of topics)'
        )

    if n_vocab != phi_train.shape[1]:
        raise ValueError(
            'shapes of `dtm_test` and `phi_train` do not match (unequal size of vocabulary)'
        )

    if isinstance(alpha, np.ndarray):
        alpha_sum = np.sum(alpha)
    else:
        alpha_sum = alpha * n_topics
        alpha = np.repeat(alpha, n_topics)

    if alpha.shape != (n_topics, ):
        raise ValueError(
            '`alpha` has invalid shape (should be vector of length n_topics)')

    # samples: random topic assignments for each document
    #          shape: n_test_docs x n_samples
    #          values in [0, n_topics) ~ theta_test
    samples = np.array([
        np.random.choice(n_topics, n_samples, p=theta_test[d, :])
        for d in range(n_test_docs)
    ])
    assert samples.shape == (n_test_docs, n_samples)
    assert 0 <= samples.min() < n_topics
    assert 0 <= samples.max() < n_topics

    # n_k: number of documents per topic and sample
    #      shape: n_topics x n_samples
    #      values in [0, n_test_docs]
    n_k = np.array([np.sum(samples == t, axis=0) for t in range(n_topics)])
    assert n_k.shape == (n_topics, n_samples)
    assert 0 <= n_k.min() <= n_test_docs
    assert 0 <= n_k.max() <= n_test_docs

    # calculate log p(z) for each sample
    # shape: 1 x n_samples
    log_p_z = np.sum(gammaln(n_k + alpha[:, np.newaxis]), axis=0) + gammaln(alpha_sum) \
              - np.sum(gammaln(alpha)) - gammaln(n_test_docs + alpha_sum)

    assert log_p_z.shape == (n_samples, )

    # calculate log p(w|z) for each sample
    # shape: 1 x n_samples

    log_p_w_given_z = np.zeros(n_samples)

    dtm_is_sparse = issparse(dtm_test)
    for d in range(n_test_docs):
        if dtm_is_sparse:
            word_counts_d = dtm_test[d].toarray().flatten()
        else:
            word_counts_d = dtm_test[d]
        words = np.repeat(np.arange(n_vocab), word_counts_d)
        assert words.shape == (word_counts_d.sum(), )

        phi_topics_d = phi_train[
            samples[d]]  # phi for topics in samples for document d
        log_p_w_given_z += np.sum(np.log(phi_topics_d[:, words]), axis=1)

    log_joint = log_p_z + log_p_w_given_z

    # calculate log theta_test
    # shape: 1 x n_samples

    log_theta_test = np.zeros(n_samples)

    for d in range(n_test_docs):
        log_theta_test += np.log(theta_test[d, samples[d]])

    # compare
    log_weights = log_joint - log_theta_test

    # calculate final log evidence
    # requires using gmpy2 to avoid numerical underflow
    exp_sum = gmpy2.mpfr(0)
    for exp in (gmpy2.exp(x) for x in log_weights):
        exp_sum += exp

    return float(gmpy2.log(exp_sum)) - np.log(n_samples)
Пример #26
0
    def _sigmoid(self, x):
        mpfrExp = gmpy2.exp(-x)

        return float(1.0 / (1.0 + mpfrExp))
Пример #27
0
def generate_serpent(*range_args):

    exp_code = '''\
macro fx_exp2_small($x):
    with $result = %s0x{poly[0]:X}:
        with $temp = $x:
            {interp_code}

macro fx_exp2($x):
    with $y = $x / 0x{FX_ONE:X}:
        with $z = $x %% 0x{FX_ONE:X}:
            fx_exp2_small($z) * 2**$y

macro fx_exp($x):
    fx_exp2($x * 0x{FX_ONE:X} / 0x{FX_LN2:X})

# Calculates the exponential function given a fixed point [base 10^18] number, so e^x
def fx_exp(x):
    return(fx_exp(x))
'''

    log_code = '''
macro fx_floor_log2($x):
    with $y = $x / 0x{FX_ONE:X}:
        with $lo = 0:
            with $hi = {MAX_POWER}:
                with $mid = ($hi + $lo)/2:
                    while (($lo + 1) != $hi):
                        if $y < 2**$mid:
                            $hi = $mid
                        else:
                            $lo = $mid
                        $mid = ($hi + $lo)/2
                    $lo

macro fx_log2_small($x):
    with $result = %s0x{poly[0]:X}:
        with $temp = $x:
            {interp_code}

macro fx_log2($x):
    with $y = fx_floor_log2($x):
        with $z = $x / 2**$y:
            $y * 0x{FX_ONE:X} + fx_log2_small($z)

macro fx_log($x):
    fx_log2($x) * 0x{FX_ONE:X} / 0x{FX_LOG2E:X}

# Calculates the natural log function given a fixed point [base 10^18] number, so ln(x)
def fx_log(x):
    return(fx_log(x))
'''

    code_items = [
        (exp_code, gmpy2.exp2, 0, 1),
        (log_code, gmpy2.log2, 1, 2),
    ]

    tab = ' '*12
    for i in range(*range_args):
        full_code = ''
        for code, ref_func, a, b in code_items:
            poly = make_fx_poly(optimal_interp(ref_func, i, a, b))
            interp_code = ''
            for j, a_j in enumerate(poly[1:-1]):
                piece = '$result %%s= 0x{poly[%d]:X}*$temp / 0x{FX_ONE:X}' % (j + 1)
                if a_j > 0:
                    interp_code += piece % '+'
                else:
                    interp_code += piece % '-'
                interp_code += '\n' + tab
                interp_code += '$temp = $temp*$x / 0x{FX_ONE:X}'
                interp_code += '\n' + tab
            if poly[0] > 0:
                this_code = code % '+'
            else:
                this_code = code % '-'
            if poly[-1] > 0:
                interp_code += '$result + 0x{poly[%d]:X}*$temp / 0x{FX_ONE:X}' % (len(poly) - 1)
            else:
                interp_code += '$result - 0x{poly[%d]:X}*$temp / 0x{FX_ONE:X}' % (len(poly) - 1)
            poly = map(abs, poly)
            fmt_args = globals().copy()
            fmt_args.update(locals())
            this_code = this_code.format(**fmt_args).format(**fmt_args)
            full_code += this_code
        full_code = full_code.replace('+0x', '0x')

        with open('fx_macros_%d.se'%i, 'w') as f:
            f.write(full_code)

        c = s.abi_contract(full_code)

        trials = 100

        log_min, log_max = 1, gmpy2.exp(128)
        exp_min, exp_max = 0, 128

        datastr = ',\n\t\t'.join([
            'avg_abs_mid_50:%E%%', 'min_rel:%E%%', 
            'max_rel:%E%%', 'mode_rel:%E%%',
            'max_diff:%E%%', 'min_diff:%E%%'
        ])
        errstr = '\terror in fx_log:\n\t\t'
        errstr += datastr
        errstr += '\n\terror in fx_exp:\n\t\t'
        errstr += datastr
        
        max_log_err = fx_relative_random_error(c.fx_log, gmpy2.log, trials, log_min, log_max)
        max_exp_err = fx_relative_random_error(c.fx_exp, gmpy2.exp, trials, exp_min, exp_max)

        errs = max_log_err + max_exp_err
        print "Relative error using %d Chebyshev nodes:" % i
        print errstr % errs
Пример #28
0
 def P(x):
     dot = ß.dot(x)
     return exp(dot)/(1 + exp(dot))
Пример #29
0
def antilog(a):
    set_integral_precision_scratch_space()
    result = gmpy2.exp(a)
    set_logarithmic_precision_persistent()
    return result
        if (randint(0, 1) == 1):
            initializer = "-" + initializer

        return Numeric(initializer)

    def eval(self):
        return self

    def getexponent(self):
        return int(gmpy2.floor(self.log().val))


PI = Numeric(gmpy2.const_pi())
PI.latex = lambda: '\pi'
E = Numeric(gmpy2.exp(1))


class Model1(Expression):
    def __init__(self):
        self.value = Numeric.new()
        self.text = str(self.value)

    def gen(self):
        x = randint(1, 550)
        if (x < 120):
            z = Numeric.create(self.value.getexponent() - 1,
                               self.value.getexponent() + 1)
            self.text = z.latex() + " + " + self.value.latex()
            self.value = self.value + z
        elif (x < 240):
 def exp(self):
     return Numeric(gmpy2.exp(self.val))
Пример #32
0
    a = mp(lower_lim)
    b = mp(upper_lim)
    Ni = num_intervals

    dx = (b - a) / Ni

    Intg = dx / 2 * (f(a) + 2 * sum(f(a + i * dx)
                                    for i in range(1, Ni)) + f(b))

    return backf(Intg, decimals)


# ------------------------------------------------
### TESTS ###

f = lambda x: [exp(xi) for xi in x] if isinstance(x, list) else exp(x)

a = 0
b = 0.1

Ni = 5

# ------------------------------------------------
### COMPARISON ###

trueVal = backf(f(b) - f(a), 8)
approx = integral(f, a, b, Ni)

results = pd.DataFrame(
    columns='Approximation AnalyticValue Error RelativeError PercentageError'.
    split())
Пример #33
0
def compute_timings():
    # operations = [bf.add, bf.mul, bf.sub, bf.div]
    operations = [gp.add, gp.mul, gp.sub, gp.div]
    labels = ["add", "mul", "sub", "div"]

    for label, operation in zip(labels, operations):
        precisions, times = [], []
        for i in range(3, 45):
            p = int(1.3**i)
            precisions.append(p)
            gp.set_context(gp.context(precision=p))
            start = time.time()
            pi = gp.const_pi()
            e = gp.exp(1)
            a = operation(e, pi)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)

            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)

            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)

            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)

            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)

            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)

            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)

            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)

            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)

            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            # pi = bf.const_pi(context)
            # e = bf.exp(1, context)
            # a = operation(e, pi, context)
            end = time.time()
            times.append(end-start)
        plt.plot(precisions, times, label=label)
        plt.legend()
        plt.xlabel('Precision')
        plt.ylabel('Time')
    plt.show()
Пример #34
0
    '--prefix',
    help='Prefix to the C style functions to generate (include the "_"!)',
    default="my_")
parser.add_argument(
    '--lgamma',
    help='Whether or not to include an implementation of the `lgamma` function',
    action='store_true')

args = parser.parse_args()

# set precision to the requested one
gmpy2.get_context().precision = args.prec

# pi & e, but to full precision within gmpy2
pi = const_pi()
e = exp(1)


# factorial, product of all integers <= x
@lru_cache
def factorial(x):
    return math.factorial(x)


# double factorial, if n is even, product of all even numbers <= n, otherwise the product of all odd numbers <= n
@lru_cache
def double_factorial(n):
    if n <= 1:
        return 1
    else:
        return n * double_factorial(n - 2)
            initializer = "-" + initializer

        return Numeric(initializer)


    def eval(self):
        return self

    def getexponent(self):
        return int(gmpy2.floor(self.log().val))



PI = Numeric(gmpy2.const_pi())
PI.latex = lambda: '\pi'
E = Numeric(gmpy2.exp(1))

class Model1(Expression):
    def __init__(self):
        self.value = Numeric.new()
        self.text = str(self.value)

    def gen(self):
        x = randint(1, 550)
        if (x < 120):
            z = Numeric.create(self.value.getexponent() - 1, self.value.getexponent() + 1)
            self.text = z.latex() + " + " + self.value.latex()
            self.value = self.value + z
        elif (x < 240):
            z = Numeric.create(self.value.getexponent() - 1, self.value.getexponent() + 1)
            self.text = z.latex() + " - " + self.latex()
Пример #36
0
def antilog(a):
    return gmpy2.exp(a)
Пример #37
0
def sigmoid(x, t_1, t_2):
    z = t_1 * x + t_2
    return mpfr(gmpy2.exp(-z) / (1.0 + gmpy2.exp(-z)))
Пример #38
0
def comparePrimeAndLog(number, log):
    gmpy2.get_context().precision = 80000
    if abs(number - gmpy2.exp(log)) < acceptable_difference:
        return True
    else:
        return False
Пример #39
0
def main():
    # Set/initialize parameters
    sf = 7  # Spreading factor
    n_fft = 2**sf  # Corresponding FFT size
    n_hamming = 7  # Hamming codeword length
    npl = 32  # payload length in LoRa symbols
    snr_start = -1 * (2 * sf + 1) - 4  # low-bound of SNR range
    snr_end = snr_start + 30  # upper-bound of SNR range
    p_error_swer = []  # list containing the sinc word error rate
    p_error_fer = []  # list containing the frame error rate
    p_error_her = []  # list containing the header error rate

    print(snr_start, snr_end)

    for snr in range(snr_start, snr_end + 1):
        # print(snr)
        sig2 = mpfr(10**(-snr / 10.0))  # Noise variance
        # snr_lin = mpfr(1*10**(snr/10.0))
        error_swer0 = mpfr(0.0)  # Initialise error
        # error_nochan = mpfr(0.0) # Initialise error
        for k in range(1, n_fft):
            nchoosek = mpfr(comb(n_fft - 1, k, exact=True))
            #################################################
            # Sync word Error Rate over AWGN Channel
            #################################################
            error_swer0 = error_swer0 - mpfr(nchoosek * (-1)**k / (k+1)) \
            * mpfr(gmpy2.exp(-k*n_fft/((k+1)*sig2)))
            #################################################
            # Symbol Error Rate over Rayleigh Channel
            #################################################
            # error_swer0 = error_swer0 - mpfr(nchoosek * (-1)**k*sig2 /
            #                      ((k+1)*sig2 + k*n_fft*1))
            # print(nchoosek)
        error_swer0 = mpfr(error_swer0,
                           32)  # Limit precision for printing/saving
        p_error_swer.append(
            1 - (1 - float(error_swer0))**2)  # sync word error rate

        pb = (2**(sf - 1)) / (2**sf - 1) * mpfr(error_swer0,
                                                32)  # bit error rate
        pcw = 1 - (1 - pb)**n_hamming - n_hamming * pb * (1 - pb)**(
            n_hamming - 1)  # codeword error rate
        pcw_header = 1 - (1 - pb)**8 - 8 * pb * (1 - pb)**(
            8 - 1)  # codeword error rate for header
        error_fer = 1 - (1 - pcw)**(npl * sf / n_hamming)  # payload error rate
        error_her = 1 - (1 - pcw_header)**(sf)  # header error rate
        p_error_fer.append(float(error_fer))
        p_error_her.append(float(error_her))
    print(p_error_swer)
    print(p_error_fer)
    print(p_error_her)
    file = open(
        "swer_sf" + str(sf) + "_h" + str(n_hamming) + "_npl" + str(npl) +
        ".txt", "w")
    file.write(json.dumps(p_error_swer))
    file.close()
    file = open(
        "fer_sf" + str(sf) + "_h" + str(n_hamming) + "_npl" + str(npl) +
        ".txt", "w")
    file.write(json.dumps(p_error_fer))
    file.close()
    file = open(
        "her_sf" + str(sf) + "_h" + str(n_hamming) + "_npl" + str(npl) +
        ".txt", "w")
    file.write(json.dumps(p_error_her))
    file.close()
Пример #40
0
#!/usr/bin/python2
import sys
import multiprocessing
import gmpy2
import scipy
import scipy.interpolate
import numpy
import random
import itertools
import time

gmpy2.get_context().precision = 256

LOG2E = int(gmpy2.log2(gmpy2.exp(1))*2**64)

def fxp_ilog2(x):
    y = x >> 64
    lo = 0
    hi = 191
    mid = (lo + hi) >> 1
    while lo < hi:
        if (1 << mid) > y:
            hi = mid - 1
        else:
            lo = mid + 1
        mid = (lo + hi) >> 1
    return lo

def fxp_lagrange(x, coeffs):
    result = 0
    xpow = 1 << 64
Пример #41
0
#import matplotlib.pyplot as plt
import os
import bisect
from numpy import linspace
# gmpy2 precision initialization
BITS = (1 << 10)
BYTES = BITS/8
gmpy2.get_context().precision = BITS # a whole lotta bits

def random():
    seed = int(os.urandom(BYTES).encode('hex'), 16)
    return gmpy2.mpfr_random(gmpy2.random_state(seed))

# Useful constants as mpfr
PI = gmpy2.acos(-1)
LOG2E = gmpy2.log2(gmpy2.exp(1))
LN2 = gmpy2.log(2)
# Same, as 192.64 fixedpoint
FX_PI = int(PI * 2**64)
FX_LOG2E = int(LOG2E * 2**64)
FX_LN2 = int(LN2 * 2**64)
FX_ONE = 1 << 64
## The index of a poly is the power of x,
## the val at the index is the coefficient.
##
## An nth degree poly is a list of len n + 1.
##
## The vals in a poly must all be floating point
## numbers.

def poly_add(p1, p2):
Пример #42
0
#import matplotlib.pyplot as plt
import os
import bisect
from numpy import linspace
# gmpy2 precision initialization
BITS = (1 << 10)
BYTES = BITS/8
gmpy2.get_context().precision = BITS # a whole lotta bits

def random():
    seed = int(os.urandom(BYTES).encode('hex'), 16)
    return gmpy2.mpfr_random(gmpy2.random_state(seed))

# Useful constants as mpfr
PI = gmpy2.acos(-1)
LOG2E = gmpy2.log2(gmpy2.exp(1))
LN2 = gmpy2.log(2)
# Same, as 192.64 fixedpoint
FX_PI = int(PI * 2**64)
FX_LOG2E = int(LOG2E * 2**64)
FX_LN2 = int(LN2 * 2**64)
FX_ONE = 1 << 64
## The index of a poly is the power of x,
## the val at the index is the coefficient.
##
## An nth degree poly is a list of len n + 1.
##
## The vals in a poly must all be floating point
## numbers.

def poly_add(p1, p2):
 def exp(self):
     return Numeric(gmpy2.exp(self.val))
Пример #44
0
 def term(x, y):
     dot = ß.dot(x)
     if y:
         return dot - log(1 + exp(dot))
     else:
         return -log(1 + exp(dot))
def get_b_smooth():
    t = gmpy2.sqrt(gmpy2.mul(gmpy2.log(N), gmpy2.log(gmpy2.log(N))))
    b= gmpy2.exp(gmpy2.mul(0.5, t))
    print("bsmooth advice: " + str(b))