def test_run_inc_dec(self):
        """
        Run :meth:`bet.sampling.adaptiveSampling.sampler.run_inc_dec` and verify
        that the output has the correct dimensions.
        """
        # sampler.run_inc_dec(increase, decrease, tolerance, rho_D, maximum,
        # input_domain, t_set, savefile, initial_sample_type)
        # returns list where each member is a tuple (discretization,
        # all_step_ratios, num_high_prob_samples,
        # sorted_indices_of_num_high_prob_samples, average_step_ratio)
        inputs = self.test_list[3]
        _, QoI_range, sampler, input_domain, savefile = inputs

        Q_ref = QoI_range * 0.5
        bin_size = 0.15 * QoI_range
        maximum = 1 / np.product(bin_size)

        def ifun(outputs):
            """
            Indicator function
            """
            inside = np.logical_and(
                np.all(np.greater_equal(outputs, Q_ref - .5 * bin_size),
                       axis=1),
                np.all(np.less_equal(outputs, Q_ref + .5 * bin_size), axis=1))
            max_values = np.repeat(maximum, outputs.shape[0], 0)
            return inside.astype('float64') * max_values

        # create rhoD_kernel
        increase = [2.0, 3.0, 5.0]
        decrease = [.7, .5, .2]
        tolerance = [1e-3, 1e-4, 1e-7]

        # create t_set
        t_set = asam.transition_set(.5, .5**5, 1.0)

        # run run_gen
        output = sampler.run_inc_dec(increase, decrease, tolerance, ifun,
                                     maximum, input_domain, t_set, savefile)

        results, r_step_size, results_rD, sort_ind, mean_ss = output

        for out in output:
            assert len(out) == 3

        for my_disc in results:
            assert my_disc.check_nums
            assert my_disc._input_sample_set.get_dim() == input_domain.shape[0]
            assert my_disc._output_sample_set.get_dim() == len(QoI_range)
        for step_sizes in r_step_size:
            assert step_sizes.shape == (sampler.num_chains,
                                        sampler.chain_length)
        for num_hps in results_rD:
            assert isinstance(num_hps, int)
        for inds in sort_ind:
            assert np.issubdtype(type(inds), np.signedinteger)
        for asr in mean_ss:
            assert asr > t_set.min_ratio
            assert asr < t_set.max_ratio
Example #2
0
    def test_run_inc_dec(self):
        """
        Run :meth:`bet.sampling.adaptiveSampling.sampler.run_inc_dec` and verify
        that the output has the correct dimensions.
        """
        # sampler.run_inc_dec(increase, decrease, tolerance, rho_D, maximum,
        # input_domain, t_set, savefile, initial_sample_type)
        # returns list where each member is a tuple (discretization,
        # all_step_ratios, num_high_prob_samples,
        # sorted_indices_of_num_high_prob_samples, average_step_ratio)
        inputs = self.test_list[3]
        _, QoI_range, sampler, input_domain, savefile = inputs

        Q_ref = QoI_range*0.5
        bin_size = 0.15*QoI_range
        maximum = 1/np.product(bin_size)

        def ifun(outputs):
            """
            Indicator function
            """
            inside = np.logical_and(np.all(np.greater_equal(outputs,
                                                            Q_ref-.5*bin_size), axis=1), np.all(np.less_equal(outputs,
                                                                                                              Q_ref+.5*bin_size), axis=1))
            max_values = np.repeat(maximum, outputs.shape[0], 0)
            return inside.astype('float64')*max_values

        # create rhoD_kernel
        increase = [2.0, 3.0, 5.0]
        decrease = [.7, .5, .2]
        tolerance = [1e-3, 1e-4, 1e-7]

        # create t_set
        t_set = asam.transition_set(.5, .5**5, 1.0)

        # run run_gen
        output = sampler.run_inc_dec(increase, decrease, tolerance, ifun,
                                     maximum, input_domain, t_set, savefile)

        results, r_step_size, results_rD, sort_ind, mean_ss = output

        for out in output:
            assert len(out) == 3

        for my_disc in results:
            assert my_disc.check_nums
            assert my_disc._input_sample_set.get_dim() == input_domain.shape[0]
            assert my_disc._output_sample_set.get_dim() == len(QoI_range)
        for step_sizes in r_step_size:
            assert step_sizes.shape == (sampler.num_chains,
                                        sampler.chain_length)
        for num_hps in results_rD:
            assert isinstance(num_hps, int)
        for inds in sort_ind:
            assert np.issubdtype(type(inds), np.signedinteger)
        for asr in mean_ss:
            assert asr > t_set.min_ratio
            assert asr < t_set.max_ratio
Example #3
0
    def test_run_gen(self):
        """
        Run :meth:`bet.sampling.adaptiveSampling.sampler.run_gen` and verify
        that the output has the correct dimensions.
        """
        # sampler.run_gen(kern_list, rho_D, maximum, param_min, param_max,
        # t_set, savefile, initial_sample_type)
        # returns list where each member is a tuple ((samples, data),
        # all_step_ratios, num_high_prob_samples,
        # sorted_indices_of_num_high_prob_samples, average_step_ratio)
        # create indicator function
        inputs = self.test_list[3]
        _, QoI_range, sampler, param_min, param_max, savefile = inputs

        Q_ref = QoI_range * 0.5
        bin_size = 0.15 * QoI_range
        maximum = 1 / np.product(bin_size)

        def ifun(outputs):
            """
            Indicator function
            """
            inside = np.logical_and(
                np.all(np.greater_equal(outputs, Q_ref - .5 * bin_size),
                       axis=1),
                np.all(np.less_equal(outputs, Q_ref + .5 * bin_size), axis=1))
            max_values = np.repeat(maximum, outputs.shape[0], 0)
            return inside.astype('float64') * max_values

        # create rhoD_kernel
        kernel_rD = asam.rhoD_kernel(maximum, ifun)
        kern_list = [kernel_rD] * 2

        # create t_set
        t_set = asam.transition_set(.5, .5**5, 1.0)

        # run run_gen
        output = sampler.run_gen(kern_list, ifun, maximum, param_min,
                                 param_max, t_set, savefile)

        results, r_step_size, results_rD, sort_ind, mean_ss = output

        for out in output:
            assert len(out) == 2

        for samples, data in results:
            assert samples.shape == (sampler.num_samples, len(param_min))
            assert data.shape == (sampler.num_samples, len(QoI_range))
        for step_sizes in r_step_size:
            assert step_sizes.shape == (sampler.num_chains,
                                        sampler.chain_length)
        for num_hps in results_rD:
            assert type(num_hps) == int
        for inds in sort_ind:
            assert np.issubdtype(type(inds), int)
        for asr in mean_ss:
            assert asr > t_set.min_ratio
            assert asr < t_set.max_ratio
Example #4
0
    def test_run_gen(self):
        """
        Run :meth:`bet.sampling.adaptiveSampling.sampler.run_gen` and verify
        that the output has the correct dimensions.
        """
        # sampler.run_gen(kern_list, rho_D, maximum, param_min, param_max,
        # t_set, savefile, initial_sample_type)
        # returns list where each member is a tuple ((samples, data),
        # all_step_ratios, num_high_prob_samples,
        # sorted_indices_of_num_high_prob_samples, average_step_ratio)
            # create indicator function
        inputs = self.test_list[3]
        _, QoI_range, sampler, param_min, param_max, savefile = inputs
        
        Q_ref = QoI_range*0.5
        bin_size = 0.15*QoI_range
        maximum = 1/np.product(bin_size)
        def ifun(outputs):
            """
            Indicator function
            """
            inside = np.logical_and(np.all(np.greater_equal(outputs,
                Q_ref-.5*bin_size), axis=1), np.all(np.less_equal(outputs,
                    Q_ref+.5*bin_size), axis=1)) 
            max_values = np.repeat(maximum, outputs.shape[0], 0)
            return inside.astype('float64')*max_values

        # create rhoD_kernel
        kernel_rD = asam.rhoD_kernel(maximum, ifun)
        kern_list = [kernel_rD]*2

        # create t_set
        t_set = asam.transition_set(.5, .5**5, 1.0) 

        # run run_gen
        output = sampler.run_gen(kern_list, ifun, maximum, param_min,
                param_max, t_set, savefile)

        results, r_step_size, results_rD, sort_ind, mean_ss = output

        for out in output:
            assert len(out) == 2

        for samples, data in results:
            assert samples.shape == (sampler.num_samples, len(param_min))
            assert data.shape == (sampler.num_samples, len(QoI_range))
        for step_sizes in r_step_size:
            assert step_sizes.shape == (sampler.num_chains,
                    sampler.chain_length) 
        for num_hps in results_rD:
            assert isinstance(num_hps, int)
        for inds in sort_ind:
            assert np.issubdtype(type(inds), int)
        for asr in mean_ss:
            assert asr > t_set.min_ratio
            assert asr < t_set.max_ratio
Example #5
0
 def setUp(self):
     """
     Set Up
     """
     self.t_set = asam.transition_set(.5, .5**5, 1.0)
     self.output_set = sample_set(self.mdim)
     self.output_set.set_values(self.output)
     self.output_set.global_to_local()
     # Update _right_local, _left_local, _width_local
     self.output_set.set_domain(self.output_domain)
     self.output_set.update_bounds()
     self.output_set.update_bounds_local()
 def setUp(self):
     """
     Set Up
     """
     self.t_set = asam.transition_set(.5, .5**5, 1.0)
     self.output_set = sample_set(self.mdim)
     self.output_set.set_values(self.output)
     self.output_set.global_to_local()
     # Update _right_local, _left_local, _width_local
     self.output_set.set_domain(self.output_domain)
     self.output_set.update_bounds()
     self.output_set.update_bounds_local()
Example #7
0
    def test_generalized_chains(self):
        """
        Test :met:`bet.sampling.adaptiveSampling.sampler.generalized_chains`
        for three different QoI maps (1 to 1, 3 to 1, 3 to 2, 10 to 4).
        """
        # create a transition set
        t_set = asam.transition_set(.5, .5**5, 1.0)

        for _, QoI_range, sampler, param_min, param_max, savefile in self.test_list:
            for initial_sample_type in ["random", "r", "lhs"]:
                verify_samples(QoI_range, sampler, param_min, param_max, t_set,
                               savefile, initial_sample_type)
Example #8
0
    def test_generalized_chains(self):
        """
        Test :met:`bet.sampling.adaptiveSampling.sampler.generalized_chains`
        for three different QoI maps (1 to 1, 3 to 1, 3 to 2, 10 to 4).
        """
        # create a transition set
        t_set = asam.transition_set(.5, .5**5, 1.0) 

        for _, QoI_range, sampler, param_min, param_max, savefile in self.test_list:
            for initial_sample_type in ["random", "r", "lhs"]:
                verify_samples(QoI_range, sampler, param_min, param_max, t_set,
                        savefile, initial_sample_type)
    def test_generalized_chains(self):
        """
        Test :meth:`bet.sampling.adaptiveSampling.sampler.generalized_chains`
        for three different QoI maps (1 to 1, 3 to 1, 3 to 2, 10 to 4).
        """
        # create a transition set
        t_set = asam.transition_set(.5, .5**5, 1.0)

        for _, QoI_range, sampler, input_domain, savefile in self.test_list:
            for initial_sample_type in ["random", "r", "lhs"]:
                print("Initial sample type: %s" % (initial_sample_type))
                for hot_start in range(3):
                    verify_samples(QoI_range, sampler, input_domain, t_set,
                                   savefile, initial_sample_type, hot_start)
Example #10
0
sample_save_file = 'sandbox3d'

# Set minima and maxima
param_domain = np.array([[-900, 1500], [.07, .15], [.1, .2]])
lam3 = 0.012
xmin = 1420
xmax = 1580
ymax = 1500
wall_height = -2.5


# Select only the stations I care about this will lead to better sampling
station_nums = [0, 4, 1] # 1, 5, 2

# Create Transition Kernel
transition_set = asam.transition_set(.5, .5**5, 0.5)

# Read in Q_ref and Q to create the appropriate rho_D 
mdat = sio.loadmat('../matfiles/Q_3D')
Q = mdat['Q']
Q = Q[:, station_nums]
Q_ref = mdat['Q_true']
Q_ref = Q_ref[14, station_nums] # 15th/20
bin_ratio = 0.15
bin_size = (np.max(Q, 0)-np.min(Q, 0))*bin_ratio

# Create experiment model
points = mdat['points']
def model(inputs):
    interp_values = np.empty((inputs.shape[0], Q.shape[1])) 
    for i in range(Q.shape[1]):
Example #11
0
sample_save_file = 'sandbox3d'

# Set minima and maxima
param_domain = np.array([[-900, 1500], [.07, .15], [.1, .2]])
lam3 = 0.012
xmin = 1420
xmax = 1580
ymax = 1500
wall_height = -2.5

# Select only the stations I care about this will lead to better sampling
station_nums = [0, 4, 1]  # 1, 5, 2

# Create Transition Kernel
transition_set = asam.transition_set(.5, .5**5, 0.5)

# Read in Q_ref and Q to create the appropriate rho_D
mdat = sio.loadmat('../matfiles/Q_3D')
Q = mdat['Q']
Q = Q[:, station_nums]
Q_ref = mdat['Q_true']
Q_ref = Q_ref[14, station_nums]  # 15th/20
bin_ratio = 0.15
bin_size = (np.max(Q, 0) - np.min(Q, 0)) * bin_ratio

# Create experiment model
points = mdat['points']


def model(inputs):
Example #12
0
 def setUp(self):
     """
     Set Up
     """
     self.t_set = asam.transition_set(.5, .5**5, 1.0)
Example #13
0
bin_ratio = 0.15
bin_size = (np.max(Q, 0) - np.min(Q, 0)) * bin_ratio

# Create experiment model
points = mdat['points']


def model(inputs):
    interp_values = np.empty((inputs.shape[0], Q.shape[1]))
    for i in xrange(Q.shape[1]):
        interp_values[:, i] = griddata(points.transpose(), Q[:, i], inputs)
    return interp_values


# Create Transition Kernel
transition_set = asam.transition_set(.5, .5**5, 1.0)

# Create kernel
maximum = 1 / np.product(bin_size)


def rho_D(outputs):
    rho_left = np.repeat([Q_ref - .5 * bin_size], outputs.shape[0], 0)
    rho_right = np.repeat([Q_ref + .5 * bin_size], outputs.shape[0], 0)
    rho_left = np.all(np.greater_equal(outputs, rho_left), axis=1)
    rho_right = np.all(np.less_equal(outputs, rho_right), axis=1)
    inside = np.logical_and(rho_left, rho_right)
    max_values = np.repeat(maximum, outputs.shape[0], 0)
    return inside.astype('float64') * max_values

Example #14
0
import bet.sampling.adaptiveSampling as asam
import scipy.io as sio
from scipy.interpolate import griddata

sample_save_file = 'sandbox2d'

# Set minima and maxima
lam_domain = np.array([[.07, .15], [.1, .2]])
param_min = lam_domain[:, 0]
param_max = lam_domain[:, 1]

# Select only the stations I care about this will lead to better sampling
station_nums = [0, 5] # 1, 6

# Create Transition Kernel
transition_set = asam.transition_set(.5, .5**5, 1.0)

# Read in Q_ref and Q to create the appropriate rho_D 
mdat = sio.loadmat('Q_2D')
Q = mdat['Q']
Q = Q[:, station_nums]
Q_ref = mdat['Q_true']
Q_ref = Q_ref[15, station_nums] # 16th/20
bin_ratio = 0.15
bin_size = (np.max(Q, 0)-np.min(Q, 0))*bin_ratio

# Create experiment model
points = mdat['points']
def model(inputs):
    interp_values = np.empty((inputs.shape[0], Q.shape[1])) 
    for i in xrange(Q.shape[1]):
Example #15
0
ymin = -1050
xmin = 1420
xmax = 1580
ymax = 1500
wall_height = -2.5

param_min = lam_domain[:, 0]
param_max = lam_domain[:, 1]


# Select only the stations I care about this will lead to better sampling
station_nums = [0, 5]  # 1, 6


# Create Transition Kernel
transition_set = asam.transition_set(0.5, 0.5 ** 5, 1.0)

# Read in Q_ref and Q to create the appropriate rho_D
mdat = sio.loadmat("Q_2D")
Q = mdat["Q"]
Q = Q[:, station_nums]
Q_ref = mdat["Q_true"]
Q_ref = Q_ref[15, station_nums]  # 16th/20
bin_ratio = 0.15
bin_size = (np.max(Q, 0) - np.min(Q, 0)) * bin_ratio

# Create experiment model
points = mdat["points"]


def model(inputs):
Example #16
0
 def setUp(self):
     """
     Set Up
     """
     self.t_set = asam.transition_set(.5, .5**5, 1.0)