def xmain(): dim = 5 s = 50 g = Gaussians([ (dim * [0.7], s / 2**.5 / 3), (dim * [0.45], s / 2**.5 / 3), ]) f = Exponentials([ (dim * [0.7], s), (dim * [0.45], s), ]) print(f.g) neval = 1e4 if True: x = g.samples(2000) map = vegas.AdaptiveMap(dim * [(0, 1)]) map.adapt_to_samples(x, f, nitn=10) itg = vegas.Integrator(map) r = itg(f, neval=neval, nitn=10, alpha=0.1) print(r.summary()) itg.map.show_grid() else: itg = vegas.Integrator(dim * [(0, 1)]) w = itg(f, neval=neval, nitn=10, alpha=0.2) print(w.summary()) r = itg(f, neval=neval, nitn=10, alpha=0.2) print(r.summary())
def main(alpha): if len(alpha) >= 1: global alpha0 alpha0 = alpha[0] if len(alpha) >= 2: global alpha1 alpha1 = alpha[1] if len(alpha) == 3: global alpha2 alpha2 = alpha[2] start_time = time.time() # assign integration volume to integrator bound = 8 dims = 6 # creates symmetric bounds specified by [-bound, bound] in dims dimensions symm_bounds = dims * [[-bound,bound]] # simultaneously initialises expectation and normalisation integrals expinteg = vegas.Integrator(symm_bounds) norminteg = vegas.Integrator(symm_bounds) # adapt to the integrands; discard results expinteg(expec, nitn=5, neval=1000) norminteg(norm,nitn=5, neval=1000) # do the final integrals expresult = expinteg(expec, nitn=10, neval=iters[indx]) normresult = norminteg(norm, nitn=10, neval=iters[indx]) E = expresult.mean/normresult.mean print(alpha, E) print("--- Iteration time: %s seconds ---" % (time.time() - start_time)) return E
def speed_test(cutoff, niters, nevals, nbatch): ##################################exi # setup ################################## dom = domain(cutoff) ################################## # integrand.py ################################## t0 = time.time() integ = vegas.Integrator(dom) result = integ(f, nitn=niters, neval=nevals) print(result.summary()) print('result = %s Q = %.2f' % (result, result.Q)) t1 = time.time() print('Python time:', t1 - t0) ################################## # cython_integrand.integrand ################################## t0 = time.time() integ = vegas.Integrator(dom) result = integ(fc, nitn=niters, neval=nevals) print(result.summary()) print('result = %s Q = %.2f' % (result, result.Q)) t1 = time.time() print('Cython time without vectorization:', t1 - t0) ################################## # cython_integrand.cython_integrand ################################## t0 = time.time() integ = vegas.Integrator(dom, nhcube_batch=nbatch) result = integ(fcbatch, nitn=niters, neval=nevals) print(result.summary()) print('result = %s Q = %.2f' % (result, result.Q)) t1 = time.time() print('Cython time with vectorization:', t1 - t0) ################################## # cython_integrand.cython_integrand ################################## t0 = time.time() integ = vegas.Integrator(dom, nhcube_batch=nbatch) result = integ(fmpi, nitn=niters, neval=nevals) print(result.summary()) print('result = %s Q = %.2f' % (result, result.Q)) t1 = time.time() print('Cython time with MPI and vectorization:', t1 - t0)
def DQPM_visc(T, muB, muQ, muS, **kwargs): """ calculate the viscosities """ # if input is a single temperature value T if (isinstance(T, float)): # integration limits pmin = 0. pmax = 15. * T # speed of sound squared cs2 = speed_sound(T, muB, muQ, muS, **kwargs) # on-shell @vegas.batchintegrand def int_eta_on_all(x): return dict( zip(list_parton, [ parton.int_eta0(x[:, 0], T, muB, muQ, muS, **kwargs) for parton in list_parton ])) @vegas.batchintegrand def int_zeta_on_all(x): return dict( zip(list_parton, [ parton.int_zeta0(x[:, 0], T, muB, muQ, muS, cs2, **kwargs) for parton in list_parton ])) integ = vegas.Integrator([[pmin, pmax]]) result_eta = integ(int_eta_on_all, nitn=10, neval=1000) integ = vegas.Integrator([[pmin, pmax]]) result_zeta = integ(int_zeta_on_all, nitn=10, neval=1000) s = DQPM_s(T, muB, muQ, muS, **kwargs) eta = sum([result_eta[parton].mean for parton in list_parton]) / (s * T**3) zeta = sum([result_zeta[parton].mean for parton in list_parton]) / (s * T**3) # if the input is a list of temperature values elif (isinstance(T, np.ndarray) or isinstance(T, list)): eta = np.zeros_like(T) zeta = np.zeros_like(T) for i, xT in enumerate(T): eta[i], zeta[i] = DQPM_visc(xT, muB, muQ, muS, **kwargs) else: raise Exception('Problem with input') return eta, zeta
def main(): print( gv.ranseed( (2050203335594632366, 8881439510219835677, 2605204918634240925))) log_stdout('eg3a.out') integ = vegas.Integrator(4 * [[0, 1]]) # adapt grid training = integ(f(), nitn=10, neval=1000) # evaluate multi-integrands result = integ(f(), nitn=10, neval=5000) print('I[0] =', result[0], ' I[1] =', result[1], ' I[2] =', result[2]) print('Q = %.2f\n' % result.Q) print('<x> =', result[1] / result[0]) print('sigma_x**2 = <x**2> - <x>**2 =', result[2] / result[0] - (result[1] / result[0])**2) print('\ncorrelation matrix:\n', gv.evalcorr(result)) unlog_stdout() r = gv.gvar(gv.mean(result), gv.sdev(result)) print(r[1] / r[0]) print((r[1] / r[0]).sdev / (result[1] / result[0]).sdev) print(r[2] / r[0] - (r[1] / r[0])**2) print(result.summary())
def cutoff_test(list_cutoffs, niters, nevals): results = [] for L in list_cutoffs: # Initialize the integrator integ = vegas.Integrator(domain(L)) # Iterate to get a good estimate of the density before printing results integ(fc, nitn=10, neval=nevals) # Print the results result = integ(fc2, nitn=niters, neval=nevals) print('cutoff = %i result = %s Q = %.2f' % (L, result, result.Q)) # Record the results results.append([L, result.mean, result.sdev, result.Q]) results = np.array(results) with open("cutoff_dependence.txt", 'wb') as file: np.savetxt(file, results, fmt='%i %0.8f %0.8f %0.2f', delimiter=',', header='Dependence of L3 integral on momentum cutoff\n' 'niters =' + str(niters) + ' nevals = ' + str(nevals) + '\n' + 'cutoff integral stat.err. Q')
def main(): ndim = 13 nods = 10009 v_integ = vegas.Integrator(ndim * [[0, 1]]) p_integ = PyMikor() p_integ.set_values(1, ndim, nods, 1, sigma=2) fof = Integrand('FCN', ndim) fof.normalize_a(2., 500) fof.show_parameters() v_result = v_integ(fof.corner_peak_fcn).mean # v_result = v_integ(fof.oscillatory_fcn, nitn=10, neval=1e3).mean p_result = p_integ(fof.corner_peak_fcn) # , eps=1e-4 # p_integ.show_parameters() # # c_result = cuba Divone # exact_res = fof.exact_corner_peak() exact_res = fof.exact_corner_peak() print(f'\nExact result : {exact_res:.12e}') v_rel_res = math.fabs((v_result - exact_res) / exact_res) print(f' Vegas : {v_result:.8e}') p_rel_res = math.fabs((p_result - exact_res) / exact_res) print(f' PyMikor : {p_result:.8e}') # # c_rel_res = (c_result - exact_res) / exact_res print(f'Relative res : {v_rel_res:.3e} : {p_rel_res:.3e}') del p_integ
def infoFig7b(samplesize, amax, rhomax, opt={'xtol': 1E-4}): # The integration is performed 10 times in order to train the integrator # and then 10 times more in order to compute the actual values. # Check the documentaion of vegas for more information. integ = vegas.Integrator([[-5, 5], [-5, 5], [0.05, amax], [-.95, rhomax]]) integ(infointFig7b, nitn=10, neval=samplesize) return integ(infointFig7b, nitn=10, neval=samplesize)
def perform_vegas(integrand, bounds, phi, data, error, model, n_iter, n_eval): vegas_integrator = vegas.Integrator(bounds) # burning some vegas_integrator(lambda p: integrand(phi, data, error, model, p), nitn=4, neval=1000) result = vegas_integrator(lambda p: integrand(phi, data, error, model, p), nitn=n_iter, neval=n_eval) results = {} results['z'] = result[0].mean results['Q'] = result.Q results['exp_par1'] = result[1].mean / results['z'] results['exp_par2'] = result[2].mean / results['z'] results['exp_par3'] = result[3].mean / results['z'] results[ 'var_par1'] = result[4].mean / results['z'] - results['exp_par1']**2 results[ 'var_par2'] = result[5].mean / results['z'] - results['exp_par2']**2 results[ 'var_par3'] = result[6].mean / results['z'] - results['exp_par3']**2 return results
def integrated_lognormal_3pt_corr_vegas(theta_scale_interval, patch_radius, log_shift, patch_area, theta_scale, N=10000): def integrand(x): theta_1 = x[0] phi_1 = x[1] theta_2 = x[2] phi_2 = x[3] theta_3 = x[4] phi_3 = x[5] return np.sin(theta_1) * np.sin(theta_2) * lognormal_3pt_corr( [theta_1, phi_1], [theta_2, phi_2], [theta_3, phi_3], log_shift) * bin_angular_scale_interval( [theta_1, phi_1], [theta_3, phi_3], theta_scale_interval) integrator = vegas.Integrator([[0, patch_radius], [0, 2 * np.pi], [0, patch_radius], [0, 2 * np.pi], [0, patch_radius], [0, 2 * np.pi]]) i_Xi = integrator(integrand, nitn=20, neval=N) return i_Xi.mean / (2 * np.pi * np.sin(theta_scale)) / ( patch_area**2 ) # return the weighted mean over the total number of iterations divided by the patch_area squared
def getpdf(bound, alpha, absv = 0.4135889547408346): @vegas.batchintegrand def norm(x): ''' Squared trial wavefunction, used as denominator for variational integral ''' r1_len = np.sqrt(x[:,0]**2 + x[:,1]**2 + x[:,2]**2) r2_len = np.sqrt(x[:,3]**2 + x[:,4]**2 + x[:,5]**2) r12 = np.sqrt((x[:,0]-x[:,3])**2 + (x[:,1]-x[:,4])**2 + (x[:,2]-x[:,5])**2) psisq = (np.exp(-2 * r1_len)* np.exp(-2 * r2_len) * np.exp(r12 / (2 * (1+ alpha*r12)))) ** 2 return psisq / absv dims = 6 # creates symmetric bounds specified by [-bound, bound] in dims dimensions symm_bounds = dims * [[-bound,bound]] # simultaneously initialises expectation and normalisation integrals norminteg = vegas.Integrator(symm_bounds) # adapt to the integrands; discard results norminteg(norm,nitn=5, neval=1000) # do the final integrals normresult = norminteg(norm, nitn=10, neval=1000000) norm = normresult.mean return norm
def integrate(N, nu, gamma, s1, s2, npoints): """ Calculating prefactors """ An = An_calc(N) Pn = Pn_calc(N, gamma) Wn = Wn_calc(N) Sn = Sn_calc(N, gamma, nu, s1, s2) domain = [[-pi / 2, pi / 2], [-pi / 2, pi / 2], [-pi / 2, pi / 2], [0, pi / 2], [0, pi / 2], [0, pi / 2], [-pi / 2, pi / 2], [-pi / 2, pi / 2], [-pi / 2, pi / 2]] f = INT.f_cython(dim=9, N=N, nu=nu, gamma=gamma) integ = vegas.Integrator(domain, nhcube_batch=1000) integ(f, nitn=10, neval=npoints) vecresult = integ(f, nitn=10, neval=npoints) retlist = np.zeros((5, 2)) prefac = (Wn * 8.0 / 6.0) * (2 * pi**2 * An * Pn * Sn) for i in range(0, vecresult.shape[0]): retlist[i, 0] = prefac * vecresult[i].mean retlist[i, 1] = prefac * vecresult[i].sdev return retlist
def xmain(): np.random.seed(123) dim = 20 sdim = 5 # log_stdout('eg6.out') rgn = np.array(dim * [(0., 1.)]) f = Gaussians([ # (sdim * [0.23] + (dim - sdim) * [0.5], 10.), (sdim * [0.39] + (dim - sdim) * [0.45], 10.), (sdim * [0.74] + (dim - sdim) * [0.45], 10.), ]) print(f.g) # f = Exponentials([ # # (sdim * [0.23] + (dim - sdim) * [0.5], 25.), # (sdim * [0.39] + (dim - sdim) * [0.4], 50.), # (sdim * [0.69] + (dim - sdim) * [0.4], 50.), # ]) # for kargs in [dict(neval=1e4), dict(nstrat=[13, 13, 13, 1, 1])]: # for kargs in [dict(neval=1e5), dict(nstrat=[26, 26, 26, 1, 1])]: # for kargs in [dict(neval=1e3), dict(nstrat=[6, 6, 6, 1, 1])]: # for kargs in [dict(neval=4e6), dict(nstrat= sdim * [14] + (dim - sdim) * [1])]: for kargs in [dict(neval=2e6), dict(nstrat= sdim * [12] + (dim - sdim) * [1])]: itg = vegas.Integrator(rgn, alpha=0.25) itg(f, nitn=15, **kargs) r = itg(f, nitn=5, **kargs) print(r.summary()) print(list(itg.nstrat), list(itg.neval_hcube_range), r.sum_neval) print() itg.map.show_grid()
def Fn_vegas(self, omega, n, atom): # There is a minimum number of phonons for a given omega, return 0 if n isn't large enough minimum = np.floor(omega / self.dos_omega_range[1]) + 1 if n < minimum: return 0 # Compute integral if n > 1 if (n > 1) and (isinstance(n, int)): integrationrange = ( n - 1) * [[self.dos_omega_range[0], self.dos_omega_range[1]]] integ = vegas.Integrator(integrationrange) # first perform adaptation; we will throw away these results # This is discussed in https://vegas.readthedocs.io/en/latest/tutorial.html#basic-integrals, under "Early Iterations" integ(self.Fn_integrand(omega=omega, n=n, atom=atom), nitn=10, neval=1000) # Keep these results after adaptation result = integ(self.Fn_integrand(omega=omega, n=n, atom=atom), nitn=10, neval=1000) # print(result.summary()) return gvar.mean(result) / factorial(n) elif n == 1: # note this will automatically return 0 if omega is outside the range for phonon_DoS if omega <= 0: return 0 else: return self.DoS_interp[atom](omega) / omega else: raise Exception('n must be a nonnegative integer')
def proceed_err(ndim, nods, nt): r_vec = [nt, ndim] p_integ = PyMikor() fof = Integrand('FCN', ndim) # fof.normalize_a(1.5, 110) exact_res = fof.exact_oscillatory() v_integ = vegas.Integrator(ndim * [[0, 1]]) v_result = [] v_result.append(v_integ(fof.oscillatory_fcn, nitn=10, neval=1e3).mean) v_result.append(v_integ(fof.oscillatory_fcn, nitn=10, neval=1e5).mean) for v in v_result: v = math.fabs((v - exact_res) / exact_res) r_vec.append(format_num(v)) for tab_prime in nods: p_integ.set_values(1, ndim, tab_prime, 1, sigma=2) # p_result = p_integ(fof.oscillatory_fcn) # , eps=1e-4 p_result = 1. # p_integ.show_parameters() p_rel_res = math.fabs((p_result - exact_res) / exact_res) r_vec.append(format_num(p_rel_res)) with open('data_f_oscillatory.csv', 'a', newline='') as file: wr = csv.writer(file, delimiter=',', quoting=csv.QUOTE_ALL) wr.writerow(r_vec) del p_integ, v_integ
def double_integrate_for_shot(self, a, K, mu_sign, minq, maxq, mu_sign_prime, minq_prime, maxq_prime, function, vegas_mode=False): if vegas_mode: nitn = 100 neval = 1000 function = self._double_outer_integral_vegas_for_g( function, self.f, K, mu_sign, mu_sign_prime, a) integ = vegas.Integrator( [[0, np.pi], [0, np.pi], [0, 2 * np.pi], [0, 2 * np.pi], [minq, maxq], [minq, maxq]], nhcube_batch=1000) result = integ(function, nitn=nitn, neval=neval) integral = result.mean return integral
def _drr(self, j, omega, lnnp, comp, neval=1e3, seed=None): l, n, n_p = lnnp ratio = n / n_p get_jf = self._res_intrp(ratio)(omega * ratio) if seed is not None: # make a unique seed np.random.seed([seed, l, n, 2 * l + n_p]) pi = np.pi @vegas.batchintegrand def c_lnnp(x): true_anomaly = x[:, :-1].T * pi sma_f = self.inverse_cumulative_a(x[:, -1], comp) jf = get_jf(sma_f) res = np.zeros(x.shape[0], float) ix = np.where(jf > 0)[0] if len(ix) > 0: res[ix] = self._integrand(j, sma_f[ix], jf[ix], lnnp, true_anomaly[:, ix]) return res self.c_lnnp = c_lnnp integ = vegas.Integrator(5 * [[0, 1]]) if get_jf is None: result = np.zeros(2) else: result = np.array(integrate(c_lnnp, integ, neval)) return result * (8 * pi * n**2 / abs(n_p) * _a2_norm_factor(*lnnp) * self.nu_r(self.sma)**2 / (self.mbh_mass / self.star_mass[comp])**2.0 * self.total_number_of_stars(comp))
def analyze_theory(V, x0list=[], plot=False): """ Extract ground-state energy E0 and psi**2 for potential V. """ # initialize path integral T = 4. ndT = 8. # use larger ndT to reduce discretization error (goes like 1/ndT**2) neval = 3e5 # should probably use more evaluations (10x?) nitn = 6 alpha = 0.1 # damp adaptation # create integrator and train it (no x0list) integrand = PathIntegrand(V=V, T=T, ndT=ndT) integ = vegas.Integrator(integrand.region, alpha=alpha) integ(integrand, neval=neval, nitn=nitn / 2, alpha=2 * alpha) # evaluate path integral with trained integrator and x0list integrand = PathIntegrand(V=V, x0list=x0list, T=T, ndT=ndT) results = integ(integrand, neval=neval, nitn=nitn, alpha=alpha) print(results.summary()) E0 = -np.log(results['exp(-E0*T)']) / T print('Ground-state energy = %s Q = %.2f\n' % (E0, results.Q)) if len(x0list) <= 0: return E0 psi2 = results['exp(-E0*T) * psi(x0)**2'] / results['exp(-E0*T)'] print('%5s %-12s %-10s' % ('x', 'psi**2', 'sho-exact')) print(27 * '-') for i, (x0i, psi2i) in enumerate(zip(x0list, psi2)): exact = np.exp(-x0i**2) / np.sqrt(np.pi) #* np.exp(-T / 2.) print("%5.1f %-12s %-10.5f" % (x0i, psi2i, exact)) if plot: plot_results(E0, x0list, psi2, T) return E0
def CalcDiscEDFNorm(self, Jscale): """ NORMALIZATION FOR TOTAL DISC EDF Arguments: Jscale - scale for actions (km/s kpc, [scalar]) Returns: Total disc normalization. """ # Transformed disc EDF @vegas.batchintegrand def TransDiscEDF(scaledstar): # Transform back coordinates Jr = Jscale * np.arctanh(scaledstar[:, 0]) Jz = Jscale * np.arctanh(scaledstar[:, 1]) Lz = Jscale * np.arctanh(scaledstar[:, 2]) feh = scaledstar[:, 3] afe = scaledstar[:, 4] # Transformation Jacobian jac = Jscale**3./(1.-(np.tanh(Jr/Jscale))**2.)/\ (1.-(np.tanh(Jz/Jscale))**2.)/\ (1.-(np.tanh(Lz/Jscale))**2.) # Create untransformed coordinate arrays acts = np.column_stack((Jr, Jz, Lz)) xi = np.column_stack((feh, afe, scaledstar[:, 5])) return (jac * 8. * np.pi**3. * (self.ThinDiscEDF(acts, xi) + self.ThickDiscEDF(acts, xi))) # Limits fehmin = -2.0 fehmax = 1.0 afemin = -1.0 afemax = 1.0 # Create integration object integ = vegas.Integrator([[0., 1.], [0., 1.], [0., 1.], [fehmin, fehmax], [afemin, afemax], [0., self.sfr_par["taum"]]]) # Train integration object integ(TransDiscEDF, nitn=5, neval=1000) # Calculate integration result = integ(TransDiscEDF, nitn=self.niter, neval=self.maxeval) val = result.mean err = result.sdev print("Disc EDF normalization and error: " + str(np.round(val, 5)) + "+/" + str(np.round(err, 4))) pererr = err / val * 100. print("% error = " + str(np.round(pererr, 2))) return (1. / val)
def __call__(self, problem, return_N=False, return_all=False): integ = vegas.Integrator([[-1.0, 1.0]] * problem.D) f = ShapeAdapter(problem.pdf) G = integ(f, nitn=self.NITN, neval=self.N).mean ret = (G, self.N * self.NITN) if return_N else G ret = (G, self.N * self.NITN) if return_all else ret return ret
def vint3(d): flamb3 = lambda c: vint2(c,d) integ = vegas.Integrator([[0, 1]]) res3 = integ(flamb3, nitn=10, neval=1000).mean global count count = count+1 print('int3 done ',count) return res3
def main(): dim = 5 log_stdout('eg5a.out') np.random.seed(123) map = vegas.AdaptiveMap(dim * [(0, 1)]) itg = vegas.Integrator(map, alpha=0.1) r = itg(f, neval=1e4, nitn=5) print(r.summary()) unlog_stdout() log_stdout('eg5b.out') np.random.seed(1234567) map = vegas.AdaptiveMap(dim * [(0, 1)]) x = np.concatenate([ np.random.normal(loc=0.45, scale=3 / 50, size=(1000, dim)), np.random.normal(loc=0.7, scale=3 / 50, size=(1000, dim)), ]) map.adapt_to_samples(x, f, nitn=5) itg = vegas.Integrator(map, alpha=0.1) r = itg(f, neval=1e4, nitn=5) print(r.summary()) unlog_stdout() log_stdout('eg5c.out') np.random.seed(123) def smc(f, neval, dim): " integrates f(y) over dim-dimensional unit hypercube " y = np.random.uniform(0, 1, (neval, dim)) fy = f(y) return (np.average(fy), np.std(fy) / neval**0.5) def g(y): jac = np.empty(y.shape[0], float) x = np.empty(y.shape, float) map.map(y, x, jac) return jac * f(x) # with map r = smc(g, 50_000, dim) print(' SMC + map:', f'{r[0]:.3f} +- {r[1]:.3f}') # without map r = smc(f, 50_000, dim) print('SMC (no map):', f'{r[0]:.3f} +- {r[1]:.3f}')
def dIdE_mc_vegas(self, E, nitn=10, neval=1e4): self.E = E integ = vegas.Integrator([[self.Gammamin, self.Gammamax], [self.Lmin, self.Lmax], [self.zmin, self.zmax]]) result = integ(self.dIdE_integrand, nitn=nitn, neval=neval) print result.summary() return result.mean
def main(): integ = vegas.Integrator(4 * [[0, 1]]) if USE_BATCH: batch_fcn = vegas.batchintegrand(ffcn_f2py.batch_fcn) print(integ(batch_fcn, neval=1e5, nitn=10).summary()) print(integ(batch_fcn, neval=1e5, nitn=10).summary()) else: print(integ(ffcn_f2py.fcn, neval=1e5, nitn=10).summary()) print(integ(ffcn_f2py.fcn, neval=1e5, nitn=10).summary())
def compute_integral(dim, sigma0, sigma1): @vegas.batchintegrand def integrand_vegas(x): return integrand(x, dim, sigma0, sigma1) integ = vegas.Integrator([[0, 10], [-3, 3]]) integ(integrand_vegas, nitn=10, neval=10000000) result = integ(integrand_vegas, nitn=10, neval=10000000) return result.mean
def calc_path_integral(m, x_i, x_f, T, N, V_fn): a = T / N span = 3 vint = vegas.Integrator([(-span, span)] * (N - 1)) I_fn = make_path_integrant(m, a, N, x_i, x_f, V_fn) vint(I_fn, nitn=100, neval=1000) res = vint(I_fn, nitn=10, neval=10000) #assert res.var / res.val < 1e-3 return res.mean, res.sdev
def main(): neval = 100000 invdet_fac = 1. region = ndim * [(0, 1.)] # naive MC evaluation of the original fcn print vegas.Integrator(region)(fcn, nitn=1, max_nhcube=1, neval=neval) # optimize M and t def fcn2(z): M = np.asarray(z[:-ndim]) M.shape = ndim, ndim t = np.asarray(z[-ndim:]) aff = AffineFunction(M, t, region, fcn) @vegas.batchintegrand def aff2(x): return aff(x)**2 gv.ranseed(1) ans = vegas.Integrator(region)(aff2, max_nhcube=1, nitn=1, neval=neval) # print '***', ans return ans.mean + invdet_fac / abs( np.linalg.det(M)) # 1000. * (1. - np.linalg.det(M)) ** 2 z0 = np.array(np.diag(ndim * [1.]).flatten().tolist() + ndim * [0.]) Mmin = lsqfit.gsl_multiminex(z0, fcn2, tol=1e-1, maxit=1000) M, t = Mmin.x[:-ndim], Mmin.x[-ndim:] print 'nit', Mmin.nit M.shape = (ndim, ndim) print 'M', np.linalg.det(M) print M print 't', t # naive MC evaluation in the new space aff = AffineFunction(M, t, region, fcn) print vegas.Integrator(region)(aff, nitn=1, max_nhcube=1, neval=neval) neval /= 10 # vegas integration of original function print vegas.Integrator(region)(fcn, neval=neval, alpha=0.2).summary() # vegas integration of transformed function print vegas.Integrator(region)(aff, neval=neval, alpha=0.2).summary()
def dIdE_mc_vegas(self, E, nitn=15, neval=2e4, verbose=False): self.E = E integ = vegas.Integrator([[self.Gammamin, self.Gammamax], [self.Lmin, self.Lmax], [self.zmin, self.zmax]]) result = integ(self.dIdE_integrand, nitn=nitn, neval=neval) if verbose: print result.summary() return result.mean
def compute_integral(self, dim, sigma0, sigma1, eps, k1, log_k2, support_r): @vegas.batchintegrand def integrand_vegas(x): return self.integrand(x, dim, sigma0, sigma1, eps, k1, log_k2) integ = vegas.Integrator([support_r, [-2, 2]]) integ(integrand_vegas, nitn=self.niter, neval=self.sample) result = integ(integrand_vegas, nitn=self.niter, neval=self.sample) return result
def main(): print(gv.ranseed((1814855126, 100213625, 262796317))) log_stdout('eg3a.out') integ = vegas.Integrator(4 * [[0, 1]]) # adapt grid training = integ(f(), nitn=10, neval=2000) # evaluate multi-integrands result = integ(f(), nitn=10, neval=10000) print('I[0] =', result[0], ' I[1] =', result[1], ' I[2] =', result[2]) print('Q = %.2f\n' % result.Q) print('<x> =', result[1] / result[0]) print('sigma_x**2 = <x**2> - <x>**2 =', result[2] / result[0] - (result[1] / result[0])**2) print('\ncorrelation matrix:\n', gv.evalcorr(result)) unlog_stdout() r = gv.gvar(gv.mean(result), gv.sdev(result)) print(r[1] / r[0]) print((r[1] / r[0]).sdev / (result[1] / result[0]).sdev) print(r[2] / r[0] - (r[1] / r[0])**2) print((r[2] / r[0] - (r[1] / r[0])**2).sdev / (result[2] / result[0] - (result[1] / result[0])**2).sdev) print(result.summary()) # do it again for a dictionary print(gv.ranseed((1814855126, 100213625, 262796317))) integ = vegas.Integrator(4 * [[0, 1]]) # adapt grid training = integ(f(), nitn=10, neval=2000) # evaluate the integrals result = integ(fdict(), nitn=10, neval=10000) log_stdout('eg3b.out') print(result) print('Q = %.2f\n' % result.Q) print('<x> =', result['x'] / result['1']) print('sigma_x**2 = <x**2> - <x>**2 =', result['x**2'] / result['1'] - (result['x'] / result['1'])**2) unlog_stdout()