def test_run_tk(self): """ Run :meth:`bet.sampling.adaptiveSampling.sampler.run_tk` and verify that the output has the correct dimensions. """ # sampler.run_tk(init_ratio, min_raio, max_ratio, rho_D, maximum, # input_domain, kernel, savefile, intial_sample_type) # returns list where each member is a tuple (discretization, # all_step_ra)tios, num_high_prob_samples, # sorted_indices_of_num_high_prob_samples, average_step_ratio) inputs = self.test_list[3] _, QoI_range, sampler, input_domain, savefile = inputs Q_ref = QoI_range * 0.5 bin_size = 0.15 * QoI_range maximum = 1 / np.product(bin_size) def ifun(outputs): """ Indicator function """ inside = np.logical_and( np.all(np.greater_equal(outputs, Q_ref - .5 * bin_size), axis=1), np.all(np.less_equal(outputs, Q_ref + .5 * bin_size), axis=1)) max_values = np.repeat(maximum, outputs.shape[0], 0) return inside.astype('float64') * max_values # create rhoD_kernel kernel_rD = asam.rhoD_kernel(maximum, ifun) # create t_set init_ratio = [1.0, .5, .25] min_ratio = [.5**2, .5**5, .5**7] max_ratio = [1.0, .75, .5] # run run_gen output = sampler.run_tk(init_ratio, min_ratio, max_ratio, ifun, maximum, input_domain, kernel_rD, savefile) results, r_step_size, results_rD, sort_ind, mean_ss = output for out in output: assert len(out) == 3 for my_disc in results: assert my_disc.check_nums assert my_disc._input_sample_set.get_dim() == input_domain.shape[0] assert my_disc._output_sample_set.get_dim() == len(QoI_range) for step_sizes in r_step_size: assert step_sizes.shape == (sampler.num_chains, sampler.chain_length) for num_hps in results_rD: assert isinstance(num_hps, int) for inds in sort_ind: assert np.issubdtype(type(inds), np.signedinteger) for asr, mir, mar in zip(mean_ss, min_ratio, max_ratio): assert asr > mir assert asr < mar
def test_run_tk(self): """ Run :meth:`bet.sampling.adaptiveSampling.sampler.run_tk` and verify that the output has the correct dimensions. """ # sampler.run_tk(init_ratio, min_raio, max_ratio, rho_D, maximum, # input_domain, kernel, savefile, intial_sample_type) # returns list where each member is a tuple (discretization, # all_step_ra)tios, num_high_prob_samples, # sorted_indices_of_num_high_prob_samples, average_step_ratio) inputs = self.test_list[3] _, QoI_range, sampler, input_domain, savefile = inputs Q_ref = QoI_range*0.5 bin_size = 0.15*QoI_range maximum = 1/np.product(bin_size) def ifun(outputs): """ Indicator function """ inside = np.logical_and(np.all(np.greater_equal(outputs, Q_ref-.5*bin_size), axis=1), np.all(np.less_equal(outputs, Q_ref+.5*bin_size), axis=1)) max_values = np.repeat(maximum, outputs.shape[0], 0) return inside.astype('float64')*max_values # create rhoD_kernel kernel_rD = asam.rhoD_kernel(maximum, ifun) # create t_set init_ratio = [1.0, .5, .25] min_ratio = [.5**2, .5**5, .5**7] max_ratio = [1.0, .75, .5] # run run_gen output = sampler.run_tk(init_ratio, min_ratio, max_ratio, ifun, maximum, input_domain, kernel_rD, savefile) results, r_step_size, results_rD, sort_ind, mean_ss = output for out in output: assert len(out) == 3 for my_disc in results: assert my_disc.check_nums assert my_disc._input_sample_set.get_dim() == input_domain.shape[0] assert my_disc._output_sample_set.get_dim() == len(QoI_range) for step_sizes in r_step_size: assert step_sizes.shape == (sampler.num_chains, sampler.chain_length) for num_hps in results_rD: assert isinstance(num_hps, int) for inds in sort_ind: assert np.issubdtype(type(inds), np.signedinteger) for asr, mir, mar in zip(mean_ss, min_ratio, max_ratio): assert asr > mir assert asr < mar
def test_run_gen(self): """ Run :meth:`bet.sampling.adaptiveSampling.sampler.run_gen` and verify that the output has the correct dimensions. """ # sampler.run_gen(kern_list, rho_D, maximum, param_min, param_max, # t_set, savefile, initial_sample_type) # returns list where each member is a tuple ((samples, data), # all_step_ratios, num_high_prob_samples, # sorted_indices_of_num_high_prob_samples, average_step_ratio) # create indicator function inputs = self.test_list[3] _, QoI_range, sampler, param_min, param_max, savefile = inputs Q_ref = QoI_range * 0.5 bin_size = 0.15 * QoI_range maximum = 1 / np.product(bin_size) def ifun(outputs): """ Indicator function """ inside = np.logical_and( np.all(np.greater_equal(outputs, Q_ref - .5 * bin_size), axis=1), np.all(np.less_equal(outputs, Q_ref + .5 * bin_size), axis=1)) max_values = np.repeat(maximum, outputs.shape[0], 0) return inside.astype('float64') * max_values # create rhoD_kernel kernel_rD = asam.rhoD_kernel(maximum, ifun) kern_list = [kernel_rD] * 2 # create t_set t_set = asam.transition_set(.5, .5**5, 1.0) # run run_gen output = sampler.run_gen(kern_list, ifun, maximum, param_min, param_max, t_set, savefile) results, r_step_size, results_rD, sort_ind, mean_ss = output for out in output: assert len(out) == 2 for samples, data in results: assert samples.shape == (sampler.num_samples, len(param_min)) assert data.shape == (sampler.num_samples, len(QoI_range)) for step_sizes in r_step_size: assert step_sizes.shape == (sampler.num_chains, sampler.chain_length) for num_hps in results_rD: assert type(num_hps) == int for inds in sort_ind: assert np.issubdtype(type(inds), int) for asr in mean_ss: assert asr > t_set.min_ratio assert asr < t_set.max_ratio
def test_run_gen(self): """ Run :meth:`bet.sampling.adaptiveSampling.sampler.run_gen` and verify that the output has the correct dimensions. """ # sampler.run_gen(kern_list, rho_D, maximum, param_min, param_max, # t_set, savefile, initial_sample_type) # returns list where each member is a tuple ((samples, data), # all_step_ratios, num_high_prob_samples, # sorted_indices_of_num_high_prob_samples, average_step_ratio) # create indicator function inputs = self.test_list[3] _, QoI_range, sampler, param_min, param_max, savefile = inputs Q_ref = QoI_range*0.5 bin_size = 0.15*QoI_range maximum = 1/np.product(bin_size) def ifun(outputs): """ Indicator function """ inside = np.logical_and(np.all(np.greater_equal(outputs, Q_ref-.5*bin_size), axis=1), np.all(np.less_equal(outputs, Q_ref+.5*bin_size), axis=1)) max_values = np.repeat(maximum, outputs.shape[0], 0) return inside.astype('float64')*max_values # create rhoD_kernel kernel_rD = asam.rhoD_kernel(maximum, ifun) kern_list = [kernel_rD]*2 # create t_set t_set = asam.transition_set(.5, .5**5, 1.0) # run run_gen output = sampler.run_gen(kern_list, ifun, maximum, param_min, param_max, t_set, savefile) results, r_step_size, results_rD, sort_ind, mean_ss = output for out in output: assert len(out) == 2 for samples, data in results: assert samples.shape == (sampler.num_samples, len(param_min)) assert data.shape == (sampler.num_samples, len(QoI_range)) for step_sizes in r_step_size: assert step_sizes.shape == (sampler.num_chains, sampler.chain_length) for num_hps in results_rD: assert isinstance(num_hps, int) for inds in sort_ind: assert np.issubdtype(type(inds), int) for asr in mean_ss: assert asr > t_set.min_ratio assert asr < t_set.max_ratio
def setUp(self): """ Set up """ self.kernel = asam.rhoD_kernel(self.maximum, self.rho_D)
def verify_samples(QoI_range, sampler, input_domain, t_set, savefile, initial_sample_type, hot_start=0): """ Run :meth:`bet.sampling.adaptiveSampling.sampler.generalized_chains` and verify that the samples have the correct dimensions and are containted in the bounded parameter space. """ # create indicator function Q_ref = QoI_range*0.5 bin_size = 0.15*QoI_range maximum = 1/np.product(bin_size) def ifun(outputs): """ Indicator function """ left = np.repeat([Q_ref-.5*bin_size], outputs.shape[0], 0) right = np.repeat([Q_ref+.5*bin_size], outputs.shape[0], 0) left = np.all(np.greater_equal(outputs, left), axis=1) right = np.all(np.less_equal(outputs, right), axis=1) inside = np.logical_and(left, right) max_values = np.repeat(maximum, outputs.shape[0], 0) return inside.astype('float64')*max_values # create rhoD_kernel kernel_rD = asam.rhoD_kernel(maximum, ifun) if comm.rank == 0: print("dim", input_domain.shape) if not hot_start: # run generalized chains (my_discretization, all_step_ratios) = sampler.generalized_chains( input_domain, t_set, kernel_rD, savefile, initial_sample_type) print("COLD", comm.rank) else: # cold start sampler1 = asam.sampler(sampler.num_samples // 2, sampler.chain_length // 2, sampler.lb_model) (my_discretization, all_step_ratios) = sampler1.generalized_chains( input_domain, t_set, kernel_rD, savefile, initial_sample_type) print("COLD then", comm.rank) comm.barrier() # hot start (my_discretization, all_step_ratios) = sampler.generalized_chains( input_domain, t_set, kernel_rD, savefile, initial_sample_type, hot_start=hot_start) print("HOT", comm.rank) comm.barrier() # check dimensions of input and output assert my_discretization.check_nums() # are the input in bounds? input_left = np.repeat([input_domain[:, 0]], sampler.num_samples, 0) input_right = np.repeat([input_domain[:, 1]], sampler.num_samples, 0) assert np.all(my_discretization._input_sample_set.get_values() <= input_right) assert np.all(my_discretization._input_sample_set.get_values() >= input_left) # check dimensions of output assert my_discretization._output_sample_set.get_dim() == len(QoI_range) # check dimensions of all_step_ratios assert all_step_ratios.shape == (sampler.num_chains, sampler.chain_length) # are all the step ratios of an appropriate size? assert np.all(all_step_ratios >= t_set.min_ratio) assert np.all(all_step_ratios <= t_set.max_ratio) # did the savefiles get created? (proper number, contain proper keys) comm.barrier() mdat = dict() # if comm.rank == 0: mdat = sio.loadmat(savefile) saved_disc = bet.sample.load_discretization(savefile) # compare the input nptest.assert_array_equal(my_discretization._input_sample_set. get_values(), saved_disc._input_sample_set.get_values()) # compare the output nptest.assert_array_equal(my_discretization._output_sample_set. get_values(), saved_disc._output_sample_set.get_values()) nptest.assert_array_equal(all_step_ratios, mdat['step_ratios']) assert sampler.chain_length == mdat['chain_length'] assert sampler.num_samples == mdat['num_samples'] assert sampler.num_chains == mdat['num_chains'] nptest.assert_array_equal(sampler.sample_batch_no, np.squeeze(mdat['sample_batch_no']))
inputs) return interp_values # Create kernel maximum = 1/np.product(bin_size) def rho_D(outputs): rho_left = np.repeat([Q_ref-.5*bin_size], outputs.shape[0], 0) rho_right = np.repeat([Q_ref+.5*bin_size], outputs.shape[0], 0) rho_left = np.all(np.greater_equal(outputs, rho_left), axis=1) rho_right = np.all(np.less_equal(outputs, rho_right), axis=1) inside = np.logical_and(rho_left, rho_right) max_values = np.repeat(maximum, outputs.shape[0], 0) return inside.astype('float64')*max_values kernel_mm = asam.maxima_mean_kernel(np.array([Q_ref]), rho_D) kernel_rD = asam.rhoD_kernel(maximum, rho_D) kernel_m = asam.maxima_kernel(np.array([Q_ref]), rho_D) heur_list = [kernel_mm, kernel_rD, kernel_m] # Create sampler chain_length = 125 num_chains = 80 num_samples = num_chains*chain_length sampler = asam.sampler(num_samples, chain_length, model) inital_sample_type = "lhs" # Get samples # Run with varying kernels gen_results = sampler.run_gen(heur_list, rho_D, maximum, param_domain, transition_set, sample_save_file) #run_reseed_results = sampler.run_gen(heur_list, rho_D, maximum, param_domain,
def verify_samples(QoI_range, sampler, input_domain, t_set, savefile, initial_sample_type, hot_start=0): """ Run :meth:`bet.sampling.adaptiveSampling.sampler.generalized_chains` and verify that the samples have the correct dimensions and are containted in the bounded parameter space. """ # create indicator function Q_ref = QoI_range * 0.5 bin_size = 0.15 * QoI_range maximum = 1 / np.product(bin_size) def ifun(outputs): """ Indicator function """ left = np.repeat([Q_ref - .5 * bin_size], outputs.shape[0], 0) right = np.repeat([Q_ref + .5 * bin_size], outputs.shape[0], 0) left = np.all(np.greater_equal(outputs, left), axis=1) right = np.all(np.less_equal(outputs, right), axis=1) inside = np.logical_and(left, right) max_values = np.repeat(maximum, outputs.shape[0], 0) return inside.astype('float64') * max_values # create rhoD_kernel kernel_rD = asam.rhoD_kernel(maximum, ifun) if comm.rank == 0: print("dim", input_domain.shape) if not hot_start: # run generalized chains (my_discretization, all_step_ratios) = sampler.generalized_chains(input_domain, t_set, kernel_rD, savefile, initial_sample_type) print("COLD", comm.rank) else: # cold start sampler1 = asam.sampler(sampler.num_samples // 2, sampler.chain_length // 2, sampler.lb_model) (my_discretization, all_step_ratios) = sampler1.generalized_chains( input_domain, t_set, kernel_rD, savefile, initial_sample_type) print("COLD then", comm.rank) comm.barrier() # hot start (my_discretization, all_step_ratios) = sampler.generalized_chains(input_domain, t_set, kernel_rD, savefile, initial_sample_type, hot_start=hot_start) print("HOT", comm.rank) comm.barrier() # check dimensions of input and output assert my_discretization.check_nums() # are the input in bounds? input_left = np.repeat([input_domain[:, 0]], sampler.num_samples, 0) input_right = np.repeat([input_domain[:, 1]], sampler.num_samples, 0) assert np.all( my_discretization._input_sample_set.get_values() <= input_right) assert np.all( my_discretization._input_sample_set.get_values() >= input_left) # check dimensions of output assert my_discretization._output_sample_set.get_dim() == len(QoI_range) # check dimensions of all_step_ratios assert all_step_ratios.shape == (sampler.num_chains, sampler.chain_length) # are all the step ratios of an appropriate size? assert np.all(all_step_ratios >= t_set.min_ratio) assert np.all(all_step_ratios <= t_set.max_ratio) # did the savefiles get created? (proper number, contain proper keys) comm.barrier() mdat = dict() # if comm.rank == 0: mdat = sio.loadmat(savefile) saved_disc = bet.sample.load_discretization(savefile) saved_disc.local_to_global() # # compare the input nptest.assert_array_equal(my_discretization._input_sample_set.get_values(), saved_disc._input_sample_set.get_values()) # compare the output nptest.assert_array_equal( my_discretization._output_sample_set.get_values(), saved_disc._output_sample_set.get_values()) nptest.assert_array_equal(all_step_ratios, mdat['step_ratios']) assert sampler.chain_length == mdat['chain_length'] assert sampler.num_samples == mdat['num_samples'] assert sampler.num_chains == mdat['num_chains'] nptest.assert_array_equal(sampler.sample_batch_no, np.squeeze(mdat['sample_batch_no']))
# Create kernel maximum = 1 / np.product(bin_size) def rho_D(outputs): rho_left = np.repeat([Q_ref - .5 * bin_size], outputs.shape[0], 0) rho_right = np.repeat([Q_ref + .5 * bin_size], outputs.shape[0], 0) rho_left = np.all(np.greater_equal(outputs, rho_left), axis=1) rho_right = np.all(np.less_equal(outputs, rho_right), axis=1) inside = np.logical_and(rho_left, rho_right) max_values = np.repeat(maximum, outputs.shape[0], 0) return inside.astype('float64') * max_values kernel_mm = asam.maxima_mean_kernel(np.array([Q_ref]), rho_D) kernel_rD = asam.rhoD_kernel(maximum, rho_D) kernel_m = asam.maxima_kernel(np.array([Q_ref]), rho_D) heur_list = [kernel_mm, kernel_rD, kernel_m] # Create sampler chain_length = 125 num_chains = 80 num_samples = num_chains * chain_length sampler = asam.sampler(num_samples, chain_length, model) inital_sample_type = "lhs" # Get samples # Run with varying kernels gen_results = sampler.run_gen(heur_list, rho_D, maximum, param_domain, transition_set, sample_save_file) # run_reseed_results = sampler.run_gen(heur_list, rho_D, maximum, param_domain,
def verify_samples(QoI_range, sampler, param_min, param_max, t_set, savefile, initial_sample_type): """ Run :meth:`bet.sampling.adaptiveSampling.sampler.generalized_chains` and verify that the samples have the correct dimensions and are containted in the bounded parameter space. """ # create indicator function Q_ref = QoI_range * 0.5 bin_size = 0.15 * QoI_range maximum = 1 / np.product(bin_size) def ifun(outputs): """ Indicator function """ left = np.repeat([Q_ref - .5 * bin_size], outputs.shape[0], 0) right = np.repeat([Q_ref + .5 * bin_size], outputs.shape[0], 0) left = np.all(np.greater_equal(outputs, left), axis=1) right = np.all(np.less_equal(outputs, right), axis=1) inside = np.logical_and(left, right) max_values = np.repeat(maximum, outputs.shape[0], 0) return inside.astype('float64') * max_values # create rhoD_kernel kernel_rD = asam.rhoD_kernel(maximum, ifun) # run generalized chains (samples, data, all_step_ratios) = sampler.generalized_chains(param_min, param_max, t_set, kernel_rD, savefile, initial_sample_type) # check dimensions of samples assert samples.shape == (sampler.num_samples, len(param_min)) # are the samples in bounds? param_left = np.repeat([param_min], sampler.num_samples, 0) param_right = np.repeat([param_max], sampler.num_samples, 0) assert np.all(samples <= param_right) assert np.all(samples >= param_left) # check dimensions of data assert data.shape == (sampler.num_samples, len(QoI_range)) # check dimensions of all_step_ratios assert all_step_ratios.shape == (sampler.num_chains, sampler.chain_length) # are all the step ratios of an appropriate size? assert np.all(all_step_ratios >= t_set.min_ratio) assert np.all(all_step_ratios <= t_set.max_ratio) # did the savefiles get created? (proper number, contain proper keys) mdat = {} if comm.rank == 0: mdat = sio.loadmat(savefile) nptest.assert_array_equal(samples, mdat['samples']) nptest.assert_array_equal(data, mdat['data']) nptest.assert_array_equal(all_step_ratios, mdat['step_ratios']) assert sampler.chain_length == mdat['chain_length'] assert sampler.num_samples == mdat['num_samples'] assert sampler.num_chains == mdat['num_chains'] nptest.assert_array_equal(sampler.sample_batch_no, np.squeeze(mdat['sample_batch_no']))
def verify_samples(QoI_range, sampler, param_min, param_max, t_set, savefile, initial_sample_type, hot_start=0): """ Run :meth:`bet.sampling.adaptiveSampling.sampler.generalized_chains` and verify that the samples have the correct dimensions and are containted in the bounded parameter space. """ # create indicator function Q_ref = QoI_range*0.5 bin_size = 0.15*QoI_range maximum = 1/np.product(bin_size) def ifun(outputs): """ Indicator function """ left = np.repeat([Q_ref-.5*bin_size], outputs.shape[0], 0) right = np.repeat([Q_ref+.5*bin_size], outputs.shape[0], 0) left = np.all(np.greater_equal(outputs, left), axis=1) right = np.all(np.less_equal(outputs, right), axis=1) inside = np.logical_and(left, right) max_values = np.repeat(maximum, outputs.shape[0], 0) return inside.astype('float64')*max_values # create rhoD_kernel kernel_rD = asam.rhoD_kernel(maximum, ifun) if not hot_start: # run generalized chains (samples, data, all_step_ratios) = sampler.generalized_chains(param_min, param_max, t_set, kernel_rD, savefile, initial_sample_type) else: # cold start sampler1 = asam.sampler(sampler.num_samples/2, sampler.chain_length/2, sampler.lb_model) (samples, data, all_step_ratios) = sampler1.generalized_chains(\ param_min, param_max, t_set, kernel_rD, savefile, initial_sample_type) # hot start (samples, data, all_step_ratios) = sampler.generalized_chains(\ param_min, param_max, t_set, kernel_rD, savefile, initial_sample_type, hot_start=hot_start) # check dimensions of samples assert samples.shape == (sampler.num_samples, len(param_min)) # are the samples in bounds? param_left = np.repeat([param_min], sampler.num_samples, 0) param_right = np.repeat([param_max], sampler.num_samples, 0) assert np.all(samples <= param_right) assert np.all(samples >= param_left) # check dimensions of data assert data.shape == (sampler.num_samples, len(QoI_range)) # check dimensions of all_step_ratios assert all_step_ratios.shape == (sampler.num_chains, sampler.chain_length) # are all the step ratios of an appropriate size? assert np.all(all_step_ratios >= t_set.min_ratio) assert np.all(all_step_ratios <= t_set.max_ratio) # did the savefiles get created? (proper number, contain proper keys) mdat = {} if comm.rank == 0: mdat = sio.loadmat(savefile) nptest.assert_array_equal(samples, mdat['samples']) nptest.assert_array_equal(data, mdat['data']) nptest.assert_array_equal(all_step_ratios, mdat['step_ratios']) assert sampler.chain_length == mdat['chain_length'] assert sampler.num_samples == mdat['num_samples'] assert sampler.num_chains == mdat['num_chains'] nptest.assert_array_equal(sampler.sample_batch_no, np.squeeze(mdat['sample_batch_no']))
rho_right = np.repeat([Q_ref+.5*bin_size], outputs.shape[0], 0) rho_left = np.all(np.greater_equal(outputs, rho_left), axis=1) rho_right = np.all(np.less_equal(outputs, rho_right), axis=1) inside = np.logical_and(rho_left, rho_right) max_values = np.repeat(maximum, outputs.shape[0], 0) return inside.astype('float64')*max_values # Create sampler chain_length = 125 num_chains = 80 num_samples = chain_length*num_chains sampler = asam.sampler(num_samples, chain_length, model) print sampler.num_samples print sampler.chain_length print sampler.num_chains # Get samples initial_sample_type = "lhs" # Create Transition Kernel transition_set = asam.transition_set(0.5, .5**5, 0.50) kernel = asam.rhoD_kernel(maximum, rho_D, 1e-4, 1.5, .1) (samples, data, all_step_ratios) = sampler.generalized_chains(param_min, param_max, transition_set, kernel, sample_save_file, initial_sample_type) bsam.in_high_prob(data, rho_D, maximum) print np.mean(all_step_ratios)