def test_ret_arr_args(self): """ Test an integrand that returns an array with an argument. """ func = lambda x, a, b: np.array((a * x**2, b * x**3)) npoints = 2000 aval, bval = 4., 5. (res_sq, res_cb), (sd_sq, sd_cb) = mcimport(func, npoints, distribution=exponential, nprocs=1, seed=123456, args=(aval, bval)) res_sq2, sd_sq2 = mcimport(lambda x, a: a * x**2, npoints, distribution=exponential, nprocs=1, seed=123456, args=(aval, )) res_cb2, sd_cb2 = mcimport(lambda x, b: b * x**3, npoints, distribution=exponential, nprocs=1, seed=123456, args=(bval, )) assert_almost_equal(res_sq, res_sq2) assert_almost_equal(res_cb, res_cb2) assert_almost_equal(sd_sq, sd_sq2) assert_almost_equal(sd_sq, sd_sq2)
def test_ret_arr(self): """ Test an integrand that returns an array. """ func = lambda x: np.array((x**2, x**3)) npoints = 2000 (res_sq, res_cb), (sd_sq, sd_cb) = mcimport(func, npoints, distribution=exponential, nprocs=1, seed=123456) res_sq2, sd_sq2 = mcimport(lambda x: x**2, npoints, distribution=exponential, nprocs=1, seed=123456) res_cb2, sd_cb2 = mcimport(lambda x: x**3, npoints, distribution=exponential, nprocs=1, seed=123456) assert_almost_equal(res_sq, res_sq2) assert_almost_equal(res_cb, res_cb2) assert_almost_equal(sd_sq, sd_sq2) assert_almost_equal(sd_sq, sd_sq2)
def test_seed_different(self): """ Test different seed -> different result. """ npoints = 50000 res,error = mcimport(lambda x: x<1.0,npoints,exponential,seed=[1234,5678]) res2, error2 = mcimport(lambda x: x<1.0,npoints,exponential,seed=[1235,5678]) assert res != res2 assert error != error2
def test_seed(self): """ Test same seed -> same result. """ npoints = 50000 res,error = mcimport(lambda x: x<1.0,npoints,exponential,seed=[1234,5678]) res2, error2 = mcimport(lambda x: x<1.0,npoints,exponential,seed=[1234,5678]) assert res == res2 assert error == error2
def test_seed_different(self): """ Test different seed -> different result. """ npoints = 50000 res, error = mcimport(lambda x: x < 1.0, npoints, exponential, seed=[1234, 5678]) res2, error2 = mcimport(lambda x: x < 1.0, npoints, exponential, seed=[1235, 5678]) assert res != res2 assert error != error2
def test_seed(self): """ Test same seed -> same result. """ npoints = 50000 res, error = mcimport(lambda x: x < 1.0, npoints, exponential, seed=[1234, 5678]) res2, error2 = mcimport(lambda x: x < 1.0, npoints, exponential, seed=[1234, 5678]) assert res == res2 assert error == error2
def run_parallel(self,f,npoints,distribution,expected_value,expected_variance,**kwargs): res,sd = mcimport(f,npoints,distribution,nprocs=2,**kwargs) error = np.sqrt(expected_variance/float(npoints)) assert_within_tol(res,expected_value,3.*max(error,1e-10), "Error in <f> in parallel run.") assert_within_tol(sd,error,0.1*max(error,1e-10), "Error in expected error in parallel run.")
def test_ret_arr(self): """ Test an integrand that returns an array. """ func = lambda x: np.array((x**2,x**3)) npoints = 2000 (res_sq, res_cb), (sd_sq, sd_cb) = mcimport(func,npoints,distribution=exponential, nprocs=1, seed=123456) res_sq2, sd_sq2 = mcimport(lambda x: x**2,npoints,distribution=exponential, nprocs=1,seed=123456) res_cb2, sd_cb2 = mcimport(lambda x: x**3,npoints,distribution=exponential, nprocs=1,seed=123456) assert_almost_equal(res_sq, res_sq2) assert_almost_equal(res_cb, res_cb2) assert_almost_equal(sd_sq, sd_sq2) assert_almost_equal(sd_sq, sd_sq2)
def test_ret_arr_args(self): """ Test an integrand that returns an array with an argument. """ func = lambda x, a,b : np.array((a*x**2,b*x**3)) npoints = 2000 aval, bval = 4.,5. (res_sq, res_cb), (sd_sq, sd_cb) = mcimport(func,npoints,distribution=exponential, nprocs=1,seed=123456,args=(aval,bval)) res_sq2, sd_sq2 = mcimport(lambda x,a: a*x**2,npoints,distribution=exponential, nprocs=1,seed=123456,args=(aval,)) res_cb2, sd_cb2 = mcimport(lambda x,b: b*x**3,npoints,distribution=exponential, nprocs=1,seed=123456,args=(bval,)) assert_almost_equal(res_sq, res_sq2) assert_almost_equal(res_cb, res_cb2) assert_almost_equal(sd_sq, sd_sq2) assert_almost_equal(sd_sq, sd_sq2)
def run_parallel(self, f, npoints, distribution, expected_value, expected_variance, **kwargs): res, sd = mcimport(f, npoints, distribution, nprocs=2, **kwargs) error = np.sqrt(expected_variance / float(npoints)) assert_within_tol(res, expected_value, 3. * max(error, 1e-10), "Error in <f> in parallel run.") assert_within_tol(sd, error, 0.1 * max(error, 1e-10), "Error in expected error in parallel run.")
def run_check_seeded_distribution(self,f,ntrials,*args,**kwargs): """ Check that the results returned by integrating f are normally distributed. Seeds each trial with the trial number. """ import scipy.stats results, errors = [], [] for itrial in range(ntrials): res, err = mcimport(f,*args,seed=itrial,**kwargs) results.append(res) errors.append(err) results = np.array(results).flatten() w,p = scipy.stats.shapiro(results) self.assertGreater(p,0.1)
def run_check_seeded_distribution(self, f, ntrials, *args, **kwargs): """ Check that the results returned by integrating f are normally distributed. Seeds each trial with the trial number. """ import scipy.stats results, errors = [], [] for itrial in range(ntrials): res, err = mcimport(f, *args, seed=itrial, **kwargs) results.append(res) errors.append(err) results = np.array(results).flatten() w, p = scipy.stats.shapiro(results) self.assertGreater(p, 0.1)
def gen_err_mcquad(self): result, error1 = mcimport(self.integrand_mcquad, self.N_iter_MCquad, self.sampler_mcquad, nprocs=self.NProcs) return 1 / 2 - result
def Bayes_score(): """ This calculates the Bayes factor score for a specific yield set and choice of error parameter, as defined in parameter file. First MCMC is run to determine the centre of the parameter space and then integration is performed. This needs a trained neural network in the Neural/ folder. Output is Bayes score and predicted (1 sigma) error. """ from .parameter import ModelParameters from .cem_function import posterior_function_mcmc_quick from .score_function import preload_params_mcmc from .plot_mcmc import restructure_chain from .wrapper import single_star_optimization from scipy.stats import multivariate_normal as scinorm from numpy.random import multivariate_normal as numnorm from skmonaco import mcimport import time # Load model parameters a = ModelParameters() preload = preload_params_mcmc() init_time = time.time() # Compute posterior + load median values - this automatically uses the neural network!! print('After %.3f seconds, finding posterior parameter values' % (time.time() - init_time)) single_star_optimization() restructure_chain('mcmc/') positions = np.load('mcmc/posteriorPDF.npy') init_param = [] for j in range(len(a.p0)): init_param.append(np.percentile(positions[:, j], 50)) print( 'After %.3f seconds, initial parameters are:' % (time.time() - init_time), init_param) # Function to compute posterior (needs a trained neural network) def posterior(theta): a = ModelParameters() post, _ = posterior_function_mcmc_quick(theta, a, preload) posterior = np.exp(post) return posterior # Read prior sigma from file sigma = [] # Read prior sigma from parameter file for i, param_name in enumerate(a.to_optimize): sigma.append(a.priors.get(param_name)[1]) sigma = np.array(sigma) # Compute covariance matrix print('After %.3f seconds, computing covariance matrix' % (time.time() - init_time)) positions = np.load('mcmc/posteriorPDF.npy') cov_matrix = np.zeros((len(a.p0), len(a.p0))) for i in range(len(a.p0)): for j in range(len(a.p0)): cov_matrix[i, j] = np.cov((positions[:, i], positions[:, j]))[1, 0] def gauss_factor(theta): # Returns gaussian fit to data return scinorm.pdf(theta, mean=np.array(init_param), cov=cov_matrix) def posterior_mod(theta): # Returns flattened posterior return posterior(theta) / gauss_factor(theta) def dist(size): # Distribution function for mcmc sampling mean = np.array(init_param) return numnorm(mean, cov_matrix, size=size) if 'beta_param' or 'log10_beta' in a.to_optimize: # don't save output here print('After %.3f seconds, starting parameter-space integration' % (time.time() - init_time)) integral, integral_err = mcimport(posterior_mod, a.int_samples, dist, nprocs=4) # Quad-core processing else: print( 'After %.3f seconds, starting parameter-space integration for beta = %.3f' % (time.time() - init_time, a.beta_param)) integral, integral_err = mcimport(posterior_mod, a.int_samples, dist, nprocs=4) # Quad-core processing print('After %.3f seconds, integration is complete' % (time.time() - init_time)) np.save('Scores/integral_' + str(a.beta_param) + '.npy', integral) np.save('Scores/integral_err_' + str(a.beta_param) + '.npy', integral_err) return integral, integral_err