def test_init(self): """ Test initalization of :class:`bet.sampling.basicSampling.sampler` """ assert self.samplers[0].num_samples == 100 assert self.samplers[0].lb_model == self.models[0] assert bsam.sampler(self.models[0], None).num_samples == None
def test_init(self): """ Test initalization of :class:`bet.sampling.basicSampling.sampler` """ assert self.samplers[0].num_samples == 100 assert self.samplers[0].lb_model == self.models[0] assert bsam.sampler(self.models[0], None).num_samples is None
def setUp(self): """ Setup map. """ param_ref = np.array([0.5, 0.5, 0.5]) Q_ref = linear_model1(param_ref) sampler = bsam.sampler(linear_model1) input_samples = sample.sample_set(3) input_samples.set_domain(np.repeat([[0.0, 1.0]], 3, axis=0)) input_samples = sampler.random_sample_set('random', input_samples, num_samples=1E2) disc = sampler.compute_QoI_and_create_discretization(input_samples, globalize=True) simpleFunP.regular_partition_uniform_distribution_rectangle_scaled( data_set=disc, Q_ref=Q_ref, rect_scale=0.5) num = disc.check_nums() disc._output_sample_set.set_error_estimates(0.01 * np.ones((num, 2))) jac = np.zeros((num, 2, 3)) jac[:, :, :] = np.array([[0.506, 0.463], [0.253, 0.918], [0.085, 0.496]]).transpose() disc._input_sample_set.set_jacobians(jac) self.sur = surrogates.piecewise_polynomial_surrogate(disc)
def setUp(self): # create 1-1 map self.param_min1 = np.zeros((1, )) self.param_max1 = np.zeros((1, )) def map_1t1(x): return np.sin(x) # create 3-1 map self.param_min3 = np.zeros((3, )) self.param_max3 = np.ones((3, )) def map_3t1(x): return np.sum(x, 1) # create 3-2 map def map_3t2(x): return np.vstack(([x[:, 0]+x[:, 1], x[:, 2]])).transpose() # create 10-4 map self.param_min10 = np.zeros((10, )) self.param_max10 = np.ones((10, )) def map_10t4(x): x1 = x[:, 0] + x[:, 1] x2 = x[:, 2] + x[:, 3] x3 = x[:, 4] + x[:, 5] x4 = np.sum(x[:, [6, 7, 8, 9]], 1) return np.vstack([x1, x2, x3, x4]).transpose() num_samples = 100 self.savefiles = ["11t11", "1t1", "3to1", "3to2", "10to4"] self.models = [map_1t1, map_1t1, map_3t1, map_3t2, map_10t4] self.samplers = [] for model in self.models: self.samplers.append(bsam.sampler(model, num_samples))
def setUp(self): # create 1-1 map self.input_domain1 = np.column_stack((np.zeros((1, )), np.ones((1, )))) def map_1t1(x): return np.sin(x) # create 3-1 map self.input_domain3 = np.column_stack((np.zeros((3, )), np.ones((3, )))) def map_3t1(x): return np.sum(x, 1) # create 3-2 map def map_3t2(x): return np.vstack(([x[:, 0] + x[:, 1], x[:, 2]])).transpose() # create 10-4 map self.input_domain10 = np.column_stack((np.zeros((10, )), np.ones( (10, )))) def map_10t4(x): x1 = x[:, 0] + x[:, 1] x2 = x[:, 2] + x[:, 3] x3 = x[:, 4] + x[:, 5] x4 = np.sum(x[:, [6, 7, 8, 9]], 1) return np.vstack([x1, x2, x3, x4]).transpose() num_samples = 100 self.savefiles = ["11t11", "1t1", "3to1", "3to2", "10to4"] self.models = [map_1t1, map_1t1, map_3t1, map_3t2, map_10t4] self.samplers = [] for model in self.models: self.samplers.append(bsam.sampler(model, num_samples)) self.input_dim1 = 1 self.input_dim2 = 2 self.input_dim3 = 10 self.input_sample_set1 = sample_set(self.input_dim1) self.input_sample_set2 = sample_set(self.input_dim2) self.input_sample_set3 = sample_set(self.input_dim3) self.input_sample_set4 = sample_set(self.input_domain1.shape[0]) self.input_sample_set4.set_domain(self.input_domain1) self.input_sample_set5 = sample_set(self.input_domain3.shape[0]) self.input_sample_set5.set_domain(self.input_domain3) self.input_sample_set6 = sample_set(self.input_domain10.shape[0]) self.input_sample_set6.set_domain(self.input_domain10)
def setUp(self): # create 1-1 map self.input_domain1 = np.column_stack((np.zeros((1,)), np.ones((1,)))) def map_1t1(x): return np.sin(x) # create 3-1 map self.input_domain3 = np.column_stack((np.zeros((3,)), np.ones((3,)))) def map_3t1(x): return np.sum(x, 1) # create 3-2 map def map_3t2(x): return np.vstack(([x[:, 0]+x[:, 1], x[:, 2]])).transpose() # create 10-4 map self.input_domain10 = np.column_stack( (np.zeros((10,)), np.ones((10,)))) def map_10t4(x): x1 = x[:, 0] + x[:, 1] x2 = x[:, 2] + x[:, 3] x3 = x[:, 4] + x[:, 5] x4 = np.sum(x[:, [6, 7, 8, 9]], 1) return np.vstack([x1, x2, x3, x4]).transpose() num_samples = 100 self.savefiles = ["11t11", "1t1", "3to1", "3to2", "10to4"] self.models = [map_1t1, map_1t1, map_3t1, map_3t2, map_10t4] self.samplers = [] for model in self.models: self.samplers.append(bsam.sampler(model, num_samples)) self.input_dim1 = 1 self.input_dim2 = 2 self.input_dim3 = 10 self.input_sample_set1 = sample_set(self.input_dim1) self.input_sample_set2 = sample_set(self.input_dim2) self.input_sample_set3 = sample_set(self.input_dim3) self.input_sample_set4 = sample_set(self.input_domain1.shape[0]) self.input_sample_set4.set_domain(self.input_domain1) self.input_sample_set5 = sample_set(self.input_domain3.shape[0]) self.input_sample_set5.set_domain(self.input_domain3) self.input_sample_set6 = sample_set(self.input_domain10.shape[0]) self.input_sample_set6.set_domain(self.input_domain10)
def setUp(self): param_ref = np.array([0.5]) Q_ref = linear_model3(param_ref) sampler = bsam.sampler(linear_model3) input_samples = sample.sample_set(1) input_samples.set_domain(np.repeat([[0.0, 1.0]], 1, axis=0)) input_samples = sampler.random_sample_set( 'random', input_samples, num_samples=1E2) disc = sampler.compute_QoI_and_create_discretization(input_samples, globalize=True) simpleFunP.regular_partition_uniform_distribution_rectangle_scaled( data_set=disc, Q_ref=Q_ref, rect_scale=0.5) num = disc.check_nums() disc._output_sample_set.set_error_estimates(0.01 * np.ones((num, 1))) jac = np.zeros((num, 1, 1)) jac[:, :, :] = np.array([[0.506]]).transpose() disc._input_sample_set.set_jacobians(jac) self.disc = disc
def setUp(self): param_ref = np.array([0.5]) Q_ref = linear_model3(param_ref) sampler = bsam.sampler(linear_model3) input_samples = sample.sample_set(1) input_samples.set_domain(np.repeat([[0.0, 1.0]], 1, axis=0)) input_samples = sampler.random_sample_set( 'random', input_samples, num_samples=1E2) disc = sampler.compute_QoI_and_create_discretization(input_samples, globalize=True) simpleFunP.regular_partition_uniform_distribution_rectangle_scaled( data_set=disc, Q_ref=Q_ref, rect_scale=0.5) num = disc.check_nums() disc._output_sample_set.set_error_estimates(0.01 * np.ones((num, 1))) jac = np.zeros((num, 1, 1)) jac[:, :, :] = np.array([[0.506]]).transpose() disc._input_sample_set.set_jacobians(jac) self.disc = disc
def setUp(self): """ Setup map. """ param_ref = np.array([0.5, 0.5, 0.5]) Q_ref = linear_model1(param_ref) sampler = bsam.sampler(linear_model1) input_samples = sample.sample_set(3) input_samples.set_domain(np.repeat([[0.0, 1.0]], 3, axis=0)) input_samples = sampler.random_sample_set('random', input_samples, num_samples=1E2) disc = sampler.compute_QoI_and_create_discretization(input_samples, globalize=True) simpleFunP.regular_partition_uniform_distribution_rectangle_scaled( data_set=disc, Q_ref=Q_ref, rect_scale=0.5) num = disc.check_nums() disc._output_sample_set.set_error_estimates(0.01 * np.ones((num, 2))) jac = np.zeros((num,2,3)) jac[:,:,:] = np.array([[0.506, 0.463],[0.253, 0.918], [0.085, 0.496]]).transpose() disc._input_sample_set.set_jacobians(jac) self.sur = surrogates.piecewise_polynomial_surrogate(disc)
def setUp(self): # create 1-1 map self.param_min1 = np.zeros((1, )) self.param_max1 = np.zeros((1, )) def map_1t1(x): return np.sin(x) # create 3-1 map self.param_min3 = np.zeros((3, )) self.param_max3 = np.ones((3, )) def map_3t1(x): return np.sum(x, 1) # create 3-2 map def map_3t2(x): return np.vstack(([x[:, 0] + x[:, 1], x[:, 2]])).transpose() # create 10-4 map self.param_min10 = np.zeros((10, )) self.param_max10 = np.ones((10, )) def map_10t4(x): x1 = x[:, 0] + x[:, 1] x2 = x[:, 2] + x[:, 3] x3 = x[:, 4] + x[:, 5] x4 = np.sum(x[:, [6, 7, 8, 9]], 1) return np.vstack([x1, x2, x3, x4]).transpose() num_samples = 100 self.savefiles = ["11t11", "1t1", "3to1", "3to2", "10to4"] self.models = [map_1t1, map_1t1, map_3t1, map_3t2, map_10t4] self.samplers = [] for model in self.models: self.samplers.append(bsam.sampler(model, num_samples))
import numpy as np import bet.calculateP.simpleFunP as simpleFunP import bet.calculateP.calculateP as calculateP import bet.postProcess.plotP as plotP import bet.postProcess.plotDomains as plotD import bet.sample as samp import bet.sampling.basicSampling as bsam from myModel import my_model # Define the sampler that will be used to create the discretization # object, which is the fundamental object used by BET to compute # solutions to the stochastic inverse problem. # The sampler and my_model is the interface of BET to the model, # and it allows BET to create input/output samples of the model. sampler = bsam.sampler(my_model) # Initialize 3-dimensional input parameter sample set object input_samples = samp.sample_set(2) # Set parameter domain input_samples.set_domain(np.repeat([[0.0, 1.0]], 2, axis=0)) ''' Suggested changes for user: Try with and without random sampling. If using random sampling, try num_samples = 1E3 and 1E4. What happens when num_samples = 1E2? Try using 'lhs' instead of 'random' in the random_sample_set.
to the number of KL terms) and the number of samples in this space. """ import numpy as np import bet.calculateP.simpleFunP as simpleFunP import bet.calculateP.calculateP as calculateP import bet.postProcess.plotP as plotP import bet.postProcess.plotDomains as plotD import bet.sample as samp import bet.sampling.basicSampling as bsam from lbModel import lb_model from myModel import my_model from Compute_Save_KL import computeSaveKL # Interface BET to the model. sampler = bsam.sampler(lb_model) # Define the number of KL terms to use to represent permeability field num_KL_terms = 2 # Compute and save the KL expansion -- can comment out after running once computeSaveKL(num_KL_terms) # Initialize input parameter sample set object input_samples = samp.sample_set(num_KL_terms) # Set parameter domain KL_term_min = -3.0 KL_term_max = 3.0 input_samples.set_domain( np.repeat([[KL_term_min, KL_term_max]], num_KL_terms, axis=0)) '''
# -*- coding: utf-8 -*- # This demonstrates how to use BET in serial to sample a parallel external model. # run by calling "python serial_parallel.py" import os, subprocess import scipy.io as sio import bet.sampling.basicSampling as bsam def lb_model(input_data, nprocs=2): io_file_name = "io_file" io_mdat = dict() io_mdat['input'] = input_data # save the input to file sio.savemat(io_file_name, io_mdat) # run the model subprocess.call(['mpirun', '-np', str(nprocs), 'python', 'parallel_model.py', io_file_name]) # read the output from file io_mdat = sio.loadmat(io_file_name) output_data = io_mdat['output'] return output_data my_sampler = bsam.sampler(lb_model) my_discretization = my_sampler.create_random_discretization(sample_type='r', input_obj=4, savefile="serial_parallel_example", num_samples=100)
import bet.calculateP.simpleFunP as simpleFunP import bet.calculateP.calculateP as calculateP import bet.calculateP.calculateError as calculateError import bet.sample as samp import bet.sampling.basicSampling as bsam import bet.surrogates as surrogates from bet.Comm import comm from lbModel import lb_model_exact, lb_model # Define the reference parameter param_ref = np.array([[0.5, 0.5, 0.5]]) (Q_ref, _) = lb_model_exact(param_ref) # Interface BET to the approximate model and create discretization object. sampler = bsam.sampler(lb_model, error_estimates=True, jacobians=True) input_samples = samp.sample_set(3) input_samples.set_domain(np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]])) my_disc = sampler.create_random_discretization("random", input_samples, num_samples=1000) # Define output probability rect_domain = np.array([[0.5, 1.5], [1.25, 2.25]]) simpleFunP.regular_partition_uniform_distribution_rectangle_domain( data_set=my_disc, rect_domain=rect_domain) # Make emulated input sets emulated_inputs = bsam.random_sample_set('r', my_disc._input_sample_set._domain, num_samples = 10001, globalize=False)
import numpy as np import bet.calculateP.simpleFunP as simpleFunP import bet.calculateP.calculateP as calculateP import bet.calculateP.calculateError as calculateError import bet.sample as samp import bet.sampling.basicSampling as bsam import bet.surrogates as surrogates from bet.Comm import comm from lbModel import lb_model_exact, lb_model # Define the reference parameter param_ref = np.array([[0.5, 0.5, 0.5]]) (Q_ref, _) = lb_model_exact(param_ref) # Interface BET to the approximate model and create discretization object. sampler = bsam.sampler(lb_model, error_estimates=True, jacobians=True) input_samples = samp.sample_set(3) input_samples.set_domain(np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]])) my_disc = sampler.create_random_discretization("random", input_samples, num_samples=1000) # Define output probability rect_domain = np.array([[0.5, 1.5], [1.25, 2.25]]) simpleFunP.regular_partition_uniform_distribution_rectangle_domain( data_set=my_disc, rect_domain=rect_domain) # Make emulated input sets emulated_inputs = bsam.random_sample_set('r', my_disc._input_sample_set._domain, num_samples=10001,