Esempio n. 1
0
def coefs_QMCSobol_wrapper(nb_samples, a_nu): # note that a lot of calculations will be repeated by doing this. We need to be smarter!
    int_val = 0
    seed = 0
    dim = 6
    for one_sample in xrange(0,nb_samples): 
        [sample, new_seed] = i4_sobol(6, seed)
        int_val = int_val + ctaa(sample, 1.0,1.0/6,5.)*mvc(sample,a_nu)
        seed = new_seed
    return int_val/nb_samples
Esempio n. 2
0
def faster_QMC_computations(nb_samples, nus): # note that a lot of calculations will be repeated by doing this. We need to be smarter!
    # first find the highest degree considered in the list of nu's
    # then go through the samples, one at a time, and everytime we see a new value, we add to the dictionary together with the T_j(value)
    max_degree = np.max(nus)
    cheb_evals = {}
    weights_eval = {}
    int_val = 0 # as for integral value
    seed = 0 # required for the sobol index
    dim = 6 # that hurts!
    for one_sample in xrange(0,nb_samples): 
        [sample, new_seed] = i4_sobol(6, seed)
        sample = sample*2 -1 # To set the anchor at (-1,-1,-1 ...) instead of the usual (0,0,...) for QMC methods
        not_computed = [a_param for a_param in one_sample if a_param not in cheb_evals] # contains the values that we have not precomputed before
        for to_compute in not_computed:
            # add these guys to the dictionary. 
            to_add_to_dict = [1]
            for one_deg in xrange(1,max_degree+1): 
                to_add_to_dict.append(np.polynomial.chebyshev.chebval(to_compute, np.hstack( (np.zeros(one_deg),1) )))
            cheb_evals[to_compute] = to_add_to_dict
            weights_eval[to_compute] = np.polynomial.chebyshev.chebweight(to_compute)
        int_val = int_val + ctaa(sample-1, 1.0,1.0/6,5.)*mvc(sample,a_nu)
        seed = new_seed
    return int_val/nb_samples
Esempio n. 3
0
def integrand(x0,x1,x2,x3,x4,x5,nu0,nu1,nu2,nu3,nu4,nu5, with_weights = True, rhs = 1., variability = 1.0/6., abar = 5.):
    # print ctaa([np.array([x0,x1,x2,x3,x4,x5])], rhs, variability, abar )
    return ctaa([np.array([x0,x1,x2,x3,x4,x5])], rhs, variability, abar )*mvc([x0,x1,x2,x3,x4,x5],np.array([nu0,nu1,nu2,nu3,nu4,nu5]), with_weights)
Esempio n. 4
0
def coefs_mpmath_wrapper(a_nu): # seems like there is no way to use mpmath for more than 3 dimensions :( 
    mp.dps = 10 # 10 digits of accuracy should be more than enough.
    return quad(lambda x0,x1,x2,x3,x4,x5: ctaa([np.array([x0,x1,x2,x3,x4,x5])], 1.0, 1.0/6.0, 5.0 )*mvc([x0,x1,x2,x3,x4,x5],a_nu),[-1,1],[-1,1],[-1,1],[-1,1],[-1,1],[-1,1], method='tanh-sinh') # tanh-sinh tends to be more accurate when dealing with singularities
Esempio n. 5
0
def coefs_nquad_wrapper(a_nu): 
    return spi.nquad(lambda x0,x1,x2,x3,x4,x5: ctaa([np.array([x0,x1,x2,x3,x4,x5])], 1.0, 1.0/6.0, 5.0 )*mvc([x0,x1,x2,x3,x4,x5],a_nu),[[-1,1]]*6)
# sp.integrate.quad(f,a,b)
# cta(ys, rhs, variability, mean_field)

# first get the computed coefficients as well as the set of all potential candidate coefficients
computed_coefs, other_J = gcc(infile, None, J) # other_J should be the same as J.

# For every multi index in J,
# Compute (numerical integration) the estimated coefficient
# Compare! 
c_nus = np.zeros(len(other_J))
for idx, one_nu in enumerate(other_J):
    # Construct the current Chebychef polynomial with the associated weight
    for nu_j in one_nu:
        if nu_j

spi.nquad(lambda x0,x1,x2,x3,x4,x5,x6,x7,x8,x9,x10,x11,x12: cta(np.array([x0,x1,x2,x3,x4,x5,x6,x7,x8,x9,x10,x11,x12]), 1.0, 1.0/13, 5.0 )*mvc([x0,x1,x2,x3,x4,x5,x6,x7,x8,x9,x10,x11,x12],a_nu),[[-1,1]]*13)
# compute_true_avg(ys, rhs, variability, mean_field):

spi.nquad(lambda x0, x1: cta(np.array([x0,x1,0.01,-0.1,-0.02,0,0,0.02,0,0,0,0,0]).transpose(), 1.0, 1.0/13, 5.0 )*mvc([x0,x1,0.01,-0.1,-0.02,0,0,0.02,0,0,0,0,0],a_nu),[[-1,1]]*2)


import numpy
import scipy.integrate
import math

def w(r, theta, phi, alpha, beta, gamma):
    return(-math.log(theta * beta))

def integrand(phi, alpha, gamma, r, theta, beta):
    ww = w(r, theta, phi, alpha, beta, gamma)
    k = 1.