コード例 #1
0
ファイル: test_calculateP.py プロジェクト: outsidercherry/BET
 def setUp(self):
     """
     Set up problem.
     """
     super(Test_prob_1to1, self).setUp()
     self.disc._input_sample_set.estimate_volume_mc()
     calcP.prob(self.disc)
コード例 #2
0
ファイル: Q_3D.py プロジェクト: leiyangcq/BET
def postprocess(station_nums, ref_num):
    
    filename = 'P_q'+str(station_nums[0]+1)+'_q'+str(station_nums[1]+1)
    if len(station_nums) == 3:
        filename += '_q'+str(station_nums[2]+1)
    filename += '_ref_'+str(ref_num+1)

    data = Q[:, station_nums]
    output_sample_set = sample.sample_set(data.shape[1])
    output_sample_set.set_values(data)
    q_ref = Q_ref[ref_num, station_nums]

    # Create Simple function approximation
    # Save points used to parition D for simple function approximation and the
    # approximation itself (this can be used to make close comparisions...)
    output_probability_set = sfun.regular_partition_uniform_distribution_rectangle_scaled(\
            output_sample_set, q_ref, rect_scale=0.15,
            cells_per_dimension=np.ones((data.shape[1],)))

    my_disc = sample.discretization(input_sample_set, output_sample_set,
            output_probability_set)

    # Calclate P on the actual samples with assumption that voronoi cells have
    # equal size
    input_sample_set.estimate_volume_mc()
    print "Calculating prob"
    calcP.prob(my_disc)
    sample.save_discretization(my_disc, filename, "prob_solution")
コード例 #3
0
ファイル: Q_3D.py プロジェクト: yangleicq/BET
def postprocess(station_nums, ref_num):

    filename = 'P_q' + str(station_nums[0] + 1) + '_q' + str(station_nums[1] +
                                                             1)
    if len(station_nums) == 3:
        filename += '_q' + str(station_nums[2] + 1)
    filename += '_ref_' + str(ref_num + 1)

    data = Q[:, station_nums]
    output_sample_set = sample.sample_set(data.shape[1])
    output_sample_set.set_values(data)
    q_ref = Q_ref[ref_num, station_nums]

    # Create Simple function approximation
    # Save points used to parition D for simple function approximation and the
    # approximation itself (this can be used to make close comparisions...)
    output_probability_set = sfun.regular_partition_uniform_distribution_rectangle_scaled(\
            output_sample_set, q_ref, rect_scale=0.15,
            cells_per_dimension=np.ones((data.shape[1],)))

    my_disc = sample.discretization(input_sample_set, output_sample_set,
                                    output_probability_set)

    # Calclate P on the actual samples with assumption that voronoi cells have
    # equal size
    input_sample_set.estimate_volume_mc()
    print "Calculating prob"
    calcP.prob(my_disc)
    sample.save_discretization(my_disc, filename, "prob_solution")
コード例 #4
0
ファイル: test_calculateP.py プロジェクト: UT-CHG/BET
 def setUp(self):
     """
     Set up problem.
     """
     super(Test_prob_1to1, self).setUp()
     self.disc._input_sample_set.estimate_volume_mc()
     calcP.prob(self.disc)
コード例 #5
0
ファイル: test_calculateP.py プロジェクト: outsidercherry/BET
 def setUp(self):
     """
     Set up problem.
     """
     super(Test_prob_3to1, self).setUp()
     self.disc._input_sample_set.estimate_volume_mc()
     calcP.prob(self.disc)
     self.P_ref = np.loadtxt(data_path + "/3to1_prob.txt.gz")
コード例 #6
0
ファイル: test_calculateP.py プロジェクト: UT-CHG/BET
 def setUp(self):
     """
     Set up problem.
     """
     super(Test_prob_3to1, self).setUp()
     self.disc._input_sample_set.estimate_volume_mc()
     calcP.prob(self.disc)
     self.P_ref = np.loadtxt(data_path + "/3to1_prob.txt.gz")
コード例 #7
0
ファイル: Q_2D.py プロジェクト: outsidercherry/BET
def postprocess(station_nums, ref_num):

    filename = 'P_q' + str(station_nums[0] + 1) + \
        '_q' + str(station_nums[1] + 1)
    if len(station_nums) == 3:
        filename += '_q' + str(station_nums[2] + 1)
    filename += '_ref_' + str(ref_num + 1)

    data = Q[:, station_nums]
    output_sample_set = sample.sample_set(data.shape[1])
    output_sample_set.set_values(data)
    q_ref = Q_ref[ref_num, station_nums]

    # Create Simple function approximation
    # Save points used to parition D for simple function approximation and the
    # approximation itself (this can be used to make close comparisions...)
    output_probability_set = sfun.regular_partition_uniform_distribution_rectangle_scaled(
        output_sample_set,
        q_ref,
        rect_scale=0.15,
        cells_per_dimension=np.ones((data.shape[1], )))

    num_l_emulate = 1e4
    set_emulated = bsam.random_sample_set('r', lam_domain, num_l_emulate)
    my_disc = sample.discretization(input_sample_set,
                                    output_sample_set,
                                    output_probability_set,
                                    emulated_input_sample_set=set_emulated)

    print("Finished emulating lambda samples")

    # Calculate P on lambda emulate
    print("Calculating prob_on_emulated_samples")
    calcP.prob_on_emulated_samples(my_disc)
    sample.save_discretization(my_disc, filename,
                               "prob_on_emulated_samples_solution")

    # Calclate P on the actual samples with assumption that voronoi cells have
    # equal size
    input_sample_set.estimate_volume_mc()
    print("Calculating prob")
    calcP.prob(my_disc)
    sample.save_discretization(my_disc, filename, "prob_solution")

    # Calculate P on the actual samples estimating voronoi cell volume with MC
    # integration
    calcP.prob_with_emulated_volumes(my_disc)
    print("Calculating prob_with_emulated_volumes")
    sample.save_discretization(my_disc, filename,
                               "prob_with_emulated_volumes_solution")
コード例 #8
0
ファイル: Q_3D_serial.py プロジェクト: npandachg/BET
def postprocess(station_nums, ref_num):
    
    filename = 'P_q'+str(station_nums[0]+1)+'_q'+str(station_nums[1]+1)
    if len(station_nums) == 3:
        filename += '_q'+str(station_nums[2]+1)
    filename += '_ref_'+str(ref_num+1)

    data = Q[:, station_nums]
    q_ref = Q_ref[ref_num, station_nums]

    # Create Simple function approximation
    # Save points used to parition D for simple function approximation and the
    # approximation itself (this can be used to make close comparisions...)
    (rho_D_M, d_distr_samples, d_Tree) = sfun.uniform_hyperrectangle(data,
            q_ref, bin_ratio=0.15,
            center_pts_per_edge=np.ones((data.shape[1],)))
    mdict = dict()
    mdict['rho_D_M'] = rho_D_M
    mdict['d_distr_samples'] = d_distr_samples

    # Calclate P on the actual samples with assumption that voronoi cells have
    # equal size
    (P1, lam_vol1, io_ptr1) = calcP.prob(samples, data, rho_D_M,
            d_distr_samples, d_Tree)
    print "Calculating prob"
    mdict['P1'] = P1
    mdict['lam_vol1'] = lam_vol1
    mdict['lem1'] = samples
    mdict['io_ptr1'] = io_ptr1

    # Export P and compare to MATLAB solution visually
    sio.savemat(filename, mdict, do_compression=True)
コード例 #9
0
ファイル: Q_2D_parallel.py プロジェクト: npandachg/BET
def postprocess(station_nums, ref_num):
    
    filename = 'P_q'+str(station_nums[0]+1)+'_q'+str(station_nums[1]+1)
    if len(station_nums) == 3:
        filename += '_q'+str(station_nums[2]+1)
    filename += '_ref_'+str(ref_num+1)

    data = Q[:, station_nums]
    q_ref = Q_ref[ref_num, station_nums]

    # Create Simple function approximation
    # Save points used to parition D for simple function approximation and the
    # approximation itself (this can be used to make close comparisions...)
    (rho_D_M, d_distr_samples, d_Tree) = sfun.uniform_hyperrectangle(data,
            q_ref, bin_ratio=0.15,
            center_pts_per_edge=np.ones((data.shape[1],)))

    num_l_emulate = 1e6
    lambda_emulate = calcP.emulate_iid_lebesgue(lam_domain, num_l_emulate)
    
    if comm.rank == 0:
        print "Finished emulating lambda samples"
        mdict = dict()
        mdict['rho_D_M'] = rho_D_M
        mdict['d_distr_samples'] = d_distr_samples 
        mdict['num_l_emulate'] = num_l_emulate

    # Calculate P on lambda emulate
    (P0, lem0, io_ptr0, emulate_ptr0) = calcP.prob_emulated(samples, data,
            rho_D_M, d_distr_samples, lambda_emulate, d_Tree)
    if comm.rank == 0:
        print "Calculating prob_emulated"
        mdict['P0'] = P0
        mdict['lem0'] = lem0
        mdict['io_ptr0'] = io_ptr0
        mdict['emulate_ptr0'] = emulate_ptr0

    # Calclate P on the actual samples with assumption that voronoi cells have
    # equal size
    (P1, lam_vol1, io_ptr1) = calcP.prob(samples, data,
            rho_D_M, d_distr_samples, d_Tree)
    if comm.rank == 0:
        print "Calculating prob"
        mdict['P1'] = P1
        mdict['lam_vol1'] = lam_vol1
        mdict['lem1'] = samples
        mdict['io_ptr1'] = io_ptr1

    # Calculate P on the actual samples estimating voronoi cell volume with MC
    # integration
    (P3, lam_vol3, lambda_emulate3, io_ptr3, emulate_ptr3) = calcP.prob_mc(samples,
            data, rho_D_M, d_distr_samples, lambda_emulate, d_Tree)
    if comm.rank == 0:
        print "Calculating prob_mc"
        mdict['P3'] = P3
        mdict['lam_vol3'] = lam_vol3
        mdict['io_ptr3'] = io_ptr3
        mdict['emulate_ptr3'] = emulate_ptr3
        # Export P
        sio.savemat(filename, mdict, do_compression=True)
コード例 #10
0
ファイル: test_calculateP.py プロジェクト: scottw13/BET-1
 def setUp(self):
     """
     Set up problem.
     """
     super(Test_prob_10to4, self).setUp()
     (self.P, self.lam_vol, _) = calcP.prob(samples=self.samples,
             data=self.data, rho_D_M=self.d_distr_prob,
             d_distr_samples=self.d_distr_samples, d_Tree=self.d_Tree)
コード例 #11
0
ファイル: test_calculateP.py プロジェクト: scottw13/BET-1
 def setUp(self):
     """
     Set up problem.
     """
     super(Test_prob_3to1, self).setUp()
     (self.P, self.lam_vol, _) = calcP.prob(samples=self.samples,
             data=self.data, rho_D_M=self.d_distr_prob,
             d_distr_samples=self.d_distr_samples, d_Tree=self.d_Tree)
     self.P_ref = np.loadtxt(data_path + "/3to1_prob.txt.gz")
コード例 #12
0
ファイル: Q_1D_serial.py プロジェクト: willnewton519/BET
def postprocess(station_nums, ref_num):
    
    filename = 'P_q'+str(station_nums[0]+1)+'_q'
    if len(station_nums) == 3:
        filename += '_q'+str(station_nums[2]+1)
    filename += '_ref_'+str(ref_num+1)

    data = Q[:, station_nums]
    q_ref = Q_ref[ref_num, station_nums]

    # Create Simple function approximation
    # Save points used to parition D for simple function approximation and the
    # approximation itself (this can be used to make close comparisions...)
    (rho_D_M, d_distr_samples, d_Tree) = sfun.uniform_hyperrectangle(data,
            q_ref, bin_ratio=0.15,
            center_pts_per_edge=np.ones((data.shape[1],)))

    num_l_emulate = 1e6
    lambda_emulate = calcP.emulate_iid_lebesgue(lam_domain, num_l_emulate)
    print "Finished emulating lambda samples"

    mdict = dict()
    mdict['rho_D_M'] = rho_D_M
    mdict['d_distr_samples'] = d_distr_samples 
    mdict['num_l_emulate'] = num_l_emulate
    mdict['lambda_emulate'] = lambda_emulate

    # Calculate P on lambda emulate
    (P0, lem0, io_ptr0, emulate_ptr0) = calcP.prob_emulated(samples, data,
            rho_D_M, d_distr_samples, lambda_emulate, d_Tree)
    print "Calculating prob_emulated"
    mdict['P0'] = P0
    mdict['lem0'] = lem0
    mdict['io_ptr0'] = io_ptr0
    mdict['emulate_ptr0'] = emulate_ptr0

    # Calclate P on the actual samples with assumption that voronoi cells have
    # equal size
    (P1, lam_vol1, io_ptr1) = calcP.prob(samples, data,
            rho_D_M, d_distr_samples, d_Tree)
    print "Calculating prob"
    mdict['P1'] = P1
    mdict['lam_vol1'] = lam_vol1
    mdict['lem1'] = samples
    mdict['io_ptr1'] = io_ptr1

    # Calculate P on the actual samples estimating voronoi cell volume with MC
    # integration
    (P3, lam_vol3, lambda_emulate3, io_ptr3, emulate_ptr3) = calcP.prob_mc(samples,
            data, rho_D_M, d_distr_samples, lambda_emulate, d_Tree)
    print "Calculating prob_mc"
    mdict['P3'] = P3
    mdict['lam_vol3'] = lam_vol3
    mdict['io_ptr3'] = io_ptr3
    mdict['emulate_ptr3'] = emulate_ptr3
    # Export P
    sio.savemat(filename, mdict, do_compression=True)
コード例 #13
0
 def setUp(self):
     """
     Set up problem.
     """
     super(Test_prob_1to1, self).setUp()
     (self.P, self.lam_vol,
      _) = calcP.prob(samples=self.samples,
                      data=self.data,
                      rho_D_M=self.d_distr_prob,
                      d_distr_samples=self.d_distr_samples,
                      d_Tree=self.d_Tree)
コード例 #14
0
 def setUp(self):
     """
     Set up problem.
     """
     super(Test_prob_3to1, self).setUp()
     (self.P, self.lam_vol,
      _) = calcP.prob(samples=self.samples,
                      data=self.data,
                      rho_D_M=self.d_distr_prob,
                      d_distr_samples=self.d_distr_samples,
                      d_Tree=self.d_Tree)
     self.P_ref = np.loadtxt(data_path + "/3to1_prob.txt.gz")
コード例 #15
0
def postprocess(station_nums, ref_num):

    filename = 'P_q' + str(station_nums[0] + 1) + '_q' + str(station_nums[1] +
                                                             1)
    if len(station_nums) == 3:
        filename += '_q' + str(station_nums[2] + 1)
    filename += '_ref_' + str(ref_num + 1)

    data = Q[:, station_nums]
    q_ref = Q_ref[ref_num, station_nums]

    # Create Simple function approximation
    # Save points used to parition D for simple function approximation and the
    # approximation itself (this can be used to make close comparisions...)
    (rho_D_M, d_distr_samples,
     d_Tree) = sfun.uniform_hyperrectangle(data,
                                           q_ref,
                                           bin_ratio=0.15,
                                           center_pts_per_edge=np.ones(
                                               (data.shape[1], )))
    mdict = dict()
    mdict['rho_D_M'] = rho_D_M
    mdict['d_distr_samples'] = d_distr_samples

    # Calclate P on the actual samples with assumption that voronoi cells have
    # equal size
    (P1, lam_vol1, io_ptr1) = calcP.prob(samples, data, rho_D_M,
                                         d_distr_samples, d_Tree)
    print "Calculating prob"
    mdict['P1'] = P1
    mdict['lam_vol1'] = lam_vol1
    mdict['lem1'] = samples
    mdict['io_ptr1'] = io_ptr1

    # Export P and compare to MATLAB solution visually
    sio.savemat(filename, mdict, do_compression=True)
コード例 #16
0
ファイル: linearMap.py プロジェクト: yangleicq/BET
Partition_discretization = sampler.create_random_discretization('random',
                                                            Partition_set,
                                                            num_samples=num_samples_discretize_D)

Monte_Carlo_discretization = sampler.create_random_discretization('random',
                                                            Monte_Carlo_set,
                                                            num_samples=num_iid_samples)

# Compute the simple function approximation to the distribution on the data space
simpleFunP.user_partition_user_distribution(my_discretization,
                                            Partition_discretization,
                                            Monte_Carlo_discretization)

# Calculate probabilities
calculateP.prob(my_discretization)

########################################
# Post-process the results (optional)
########################################
# Show some plots of the different sample sets
plotD.scatter_2D(my_discretization._input_sample_set,
                 filename = 'Parameter_Samples',
                 file_extension = '.eps')
plotD.scatter_2D(my_discretization._output_sample_set,
                 filename = 'QoI_Samples',
                 file_extension = '.eps')
plotD.scatter_2D(my_discretization._output_probability_set,
                 filename = 'Data_Space_Discretization',
                 file_extension = '.eps')
'''
コード例 #17
0
ファイル: surrogates.py プロジェクト: yangleicq/BET
    def calculate_prob_for_sample_set_region(self,
                                             s_set,
                                             regions,
                                             update_input=True):
        """
        Solves stochastic inverse problem based on surrogate points and the
        MC assumption. Calculates the probability of a regions of input space
        and error estimates for those probabilities.

        :param: s_set: sample set for which to calculate error
        :type s_set: :class:`bet.sample.sample_set_base`
        :param region: list of regions of s_set for which to calculate error
        :type region: list
        :param update_input: whether or not to update probabilities and
            errror identifiers for input discretization
        :type update_input: bool

        :rtype: tuple
        :returns: (probabilities, ``error_estimates``), the probability and
            error estimates for the region
        
        """
        if not hasattr(self, 'surrogate_discretization'):
            msg = "surrogate discretization has not been created"
            raise calculateError.wrong_argument_type(msg)
        if not isinstance(s_set, sample.sample_set_base):
            msg = "s_set must be of type bet.sample.sample_set_base"
            raise calculateError.wrong_argument_type(msg)

        # Calculate probability of region
        if self.surrogate_discretization._input_sample_set._volumes_local\
                is None:
            self.surrogate_discretization._input_sample_set.\
                    estimate_volume_mc(globalize=False)
        calculateP.prob(self.surrogate_discretization, globalize=False)
        prob_new_values = calculateP.prob_from_sample_set(\
                self.surrogate_discretization._input_sample_set, s_set)

        # Calculate for each region
        probabilities = []
        error_estimates = []
        for region in regions:
            marker = np.equal(s_set._region, region)
            probability = np.sum(prob_new_values[marker])

            # Calculate error estimate for region
            model_error = calculateError.model_error(\
                    self.surrogate_discretization)
            error_estimate = model_error.calculate_for_sample_set_region_mc(\
                    s_set, region)
            probabilities.append(probability)
            error_estimates.append(error_estimate)
        # Update input only if 1 region is given
        if update_input:
            num = self.input_disc._input_sample_set.check_num()
            prob = np.zeros((num, ))
            error_id = np.zeros((num, ))
            for i in range(num):
                Itemp = np.equal(self.dummy_disc._emulated_ii_ptr_local, i)
                prob_sum = np.sum(self.surrogate_discretization.\
                        _input_sample_set._probabilities_local[Itemp])
                prob[i] = comm.allreduce(prob_sum, op=MPI.SUM)
                error_id_sum = np.sum(self.surrogate_discretization.\
                        _input_sample_set._error_id_local[Itemp])
                error_id[i] = comm.allreduce(error_id_sum, op=MPI.SUM)
            self.input_disc._input_sample_set.set_probabilities(prob)
            self.input_disc._input_sample_set.set_error_id(error_id)

        return (probabilities, error_estimates)
コード例 #18
0
ファイル: test_calculateP.py プロジェクト: mpresho/BET
        # RESULTS WHERE SAMPLES = LAMBDA_EMULATE
        # samples are on a regular grid
        # result_wtree, result_wsamples, result_emulated_rg
        self.result_emulated_rg = calcP.prob_emulated(self.r_samples, self.r_data,
                self.rho_D_M, self.d_distr_samples, self.lam_domain,
                self.r_samples, self.d_Tree)
        self.result_wtree = result_emulated_rg
        self.result_wsamples = result_emulated_rg
        self.result_wotree = calcP.prob_emulated(self.r_samples, self.r_data,
                self.rho_D_M, self.d_distr_samples, self.lam_domain,
                self.r_samples)
        self.result_wosamples = calcP.prob_emulated(self.r_samples, self.r_data,
                self.rho_D_M, self.d_distr_samples, self.lam_domain)

        self.result_prob_rg = calcP.prob(self.r_samples, self.r_data,
                self.rho_D_M, self.d_distr_samples, self.lam_domain,
                self.d_Tree)
        self.result_mc_rg = calcP.prob_mc(self.r_samples, self.r_data,
                self.rho_D_M, self.d_distr_samples, self.lam_domain,
                self.r_samples, self.d_Tree)

        # samples are iid
        self.result_emulated_iid = calcP.prob_emulated(self.u_samples, self.u_data,
                self.rho_D_M, self.d_distr_samples, self.lam_domain,
                self.u_samples, self.d_Tree)
        self.result_prob_iid = calcP.prob(self.u_samples, self.u_data,
                self.rho_D_M, self.d_distr_samples, self.lam_domain,
                self.d_Tree)
        self.result_mc_iid = calcP.prob_mc(self.u_samples, self.u_data,
                self.rho_D_M, self.d_distr_samples, self.lam_domain,
コード例 #19
0
ファイル: linear_condnum_binratio.py プロジェクト: beckym/BET
#QoI_indices = [0, 3, 5, 8, 9]
#QoI_indices = [3, 4, 5, 8, 9]
#QoI_indices = [2, 3, 5, 6, 9]

# Restrict the data to have just QoI_indices
data = data[:, QoI_indices]
Q_ref = Q[QoI_indices, :].dot(0.5 * np.ones(Lambda_dim))
# bin_ratio defines the uncertainty in our data
bin_ratio = 0.25

# Find the simple function approximation
(d_distr_prob, d_distr_samples, d_Tree) = simpleFunP.uniform_hyperrectangle(\
    data=data, Q_ref=Q_ref, bin_ratio=bin_ratio, center_pts_per_edge = 1)

# Calculate probablities making the Monte Carlo assumption
(P,  lam_vol, io_ptr) = calculateP.prob(samples=samples, data=data,
    rho_D_M=d_distr_prob, d_distr_samples=d_distr_samples)

percentile = 1.0
# Sort samples by highest probability density and find how many samples lie in
# the support of the inverse solution.  With the Monte Carlo assumption, this
# also tells us the approximate volume of this support.
(num_samples, P_high, samples_high, lam_vol_high, data_high) =\
    postTools.sample_highest_prob(top_percentile=percentile, P_samples=P,
    samples=samples, lam_vol=lam_vol,data = data,sort=True)

# Print the number of samples that make up the highest percentile percent
# samples and ratio of the volume of the parameter domain they take up
if comm.rank == 0:
    print (num_samples, np.sum(lam_vol_high))
コード例 #20
0
ファイル: surrogates.py プロジェクト: leiyangcq/BET
    def calculate_prob_for_sample_set_region(self, s_set, 
                                             regions, update_input=True):
        """
        Solves stochastic inverse problem based on surrogate points and the
        MC assumption. Calculates the probability of a regions of input space
        and error estimates for those probabilities.

        :param: s_set: sample set for which to calculate error
        :type s_set: :class:`bet.sample.sample_set_base`
        :param region: list of regions of s_set for which to calculate error
        :type region: list
        :param update_input: whether or not to update probabilities and
            errror identifiers for input discretization
        :type update_input: bool

        :rtype: tuple
        :returns: (probabilities, ``error_estimates``), the probability and
            error estimates for the region
        
        """
        if not hasattr(self, 'surrogate_discretization'):
            msg = "surrogate discretization has not been created"
            raise calculateError.wrong_argument_type(msg)
        if not isinstance(s_set, sample.sample_set_base):
            msg = "s_set must be of type bet.sample.sample_set_base"
            raise calculateError.wrong_argument_type(msg)
            
        # Calculate probability of region 
        if self.surrogate_discretization._input_sample_set._volumes_local\
                is None:
            self.surrogate_discretization._input_sample_set.\
                    estimate_volume_mc(globalize=False)
        calculateP.prob(self.surrogate_discretization, globalize=False)
        prob_new_values = calculateP.prob_from_sample_set(\
                self.surrogate_discretization._input_sample_set, s_set)
        
        # Calculate for each region
        probabilities = []
        error_estimates = []
        for region in regions:
            marker = np.equal(s_set._region, region)
            probability = np.sum(prob_new_values[marker])

            # Calculate error estimate for region
            model_error = calculateError.model_error(\
                    self.surrogate_discretization)
            error_estimate = model_error.calculate_for_sample_set_region_mc(\
                    s_set, region)
            probabilities.append(probability)
            error_estimates.append(error_estimate)
        # Update input only if 1 region is given
        if update_input:
            num = self.input_disc._input_sample_set.check_num()
            prob = np.zeros((num,))
            error_id = np.zeros((num,))
            for i in range(num):
                Itemp = np.equal(self.dummy_disc._emulated_ii_ptr_local, i)
                prob_sum = np.sum(self.surrogate_discretization.\
                        _input_sample_set._probabilities_local[Itemp])
                prob[i] = comm.allreduce(prob_sum, op=MPI.SUM)
                error_id_sum = np.sum(self.surrogate_discretization.\
                        _input_sample_set._error_id_local[Itemp])
                error_id[i] = comm.allreduce(error_id_sum, op=MPI.SUM)
            self.input_disc._input_sample_set.set_probabilities(prob)
            self.input_disc._input_sample_set.set_error_id(error_id)
                    
        return (probabilities, error_estimates)
コード例 #21
0
'''

# Restrict the data to have just QoI_indices
data = data[:, QoI_indices]
Q_ref = Q[QoI_indices, :].dot(0.5 * np.ones(Lambda_dim))

# bin_ratio defines the uncertainty in our data
bin_ratio = 0.25

# Find the simple function approximation
(d_distr_prob, d_distr_samples, d_Tree) = simpleFunP.uniform_hyperrectangle(\
    data=data, Q_ref=Q_ref, bin_ratio=bin_ratio, center_pts_per_edge = 1)

# Calculate probablities making the Monte Carlo assumption
(P, lam_vol, io_ptr) = calculateP.prob(samples=samples,
                                       data=data,
                                       rho_D_M=d_distr_prob,
                                       d_distr_samples=d_distr_samples)

percentile = 1.0
# Sort samples by highest probability density and find how many samples lie in
# the support of the inverse solution.  With the Monte Carlo assumption, this
# also tells us the approximate volume of this support.
(num_samples, P_high, samples_high, lam_vol_high, data_high) =\
    postTools.sample_highest_prob(top_percentile=percentile, P_samples=P,
    samples=samples, lam_vol=lam_vol,data = data,sort=True)

# Print the number of samples that make up the highest percentile percent
# samples and ratio of the volume of the parameter domain they take up
if comm.rank == 0:
    print(num_samples, np.sum(lam_vol_high))