Exemplo n.º 1
0
 def setUp(self):
     """
     Set up problem.
     """
     super(Test_prob_1to1, self).setUp()
     self.disc._input_sample_set.estimate_volume_mc()
     calcP.prob(self.disc)
Exemplo n.º 2
0
def postprocess(station_nums, ref_num):
    
    filename = 'P_q'+str(station_nums[0]+1)+'_q'+str(station_nums[1]+1)
    if len(station_nums) == 3:
        filename += '_q'+str(station_nums[2]+1)
    filename += '_ref_'+str(ref_num+1)

    data = Q[:, station_nums]
    output_sample_set = sample.sample_set(data.shape[1])
    output_sample_set.set_values(data)
    q_ref = Q_ref[ref_num, station_nums]

    # Create Simple function approximation
    # Save points used to parition D for simple function approximation and the
    # approximation itself (this can be used to make close comparisions...)
    output_probability_set = sfun.regular_partition_uniform_distribution_rectangle_scaled(\
            output_sample_set, q_ref, rect_scale=0.15,
            cells_per_dimension=np.ones((data.shape[1],)))

    my_disc = sample.discretization(input_sample_set, output_sample_set,
            output_probability_set)

    # Calclate P on the actual samples with assumption that voronoi cells have
    # equal size
    input_sample_set.estimate_volume_mc()
    print "Calculating prob"
    calcP.prob(my_disc)
    sample.save_discretization(my_disc, filename, "prob_solution")
Exemplo n.º 3
0
def postprocess(station_nums, ref_num):

    filename = 'P_q' + str(station_nums[0] + 1) + '_q' + str(station_nums[1] +
                                                             1)
    if len(station_nums) == 3:
        filename += '_q' + str(station_nums[2] + 1)
    filename += '_ref_' + str(ref_num + 1)

    data = Q[:, station_nums]
    output_sample_set = sample.sample_set(data.shape[1])
    output_sample_set.set_values(data)
    q_ref = Q_ref[ref_num, station_nums]

    # Create Simple function approximation
    # Save points used to parition D for simple function approximation and the
    # approximation itself (this can be used to make close comparisions...)
    output_probability_set = sfun.regular_partition_uniform_distribution_rectangle_scaled(\
            output_sample_set, q_ref, rect_scale=0.15,
            cells_per_dimension=np.ones((data.shape[1],)))

    my_disc = sample.discretization(input_sample_set, output_sample_set,
                                    output_probability_set)

    # Calclate P on the actual samples with assumption that voronoi cells have
    # equal size
    input_sample_set.estimate_volume_mc()
    print "Calculating prob"
    calcP.prob(my_disc)
    sample.save_discretization(my_disc, filename, "prob_solution")
Exemplo n.º 4
0
 def setUp(self):
     """
     Set up problem.
     """
     super(Test_prob_1to1, self).setUp()
     self.disc._input_sample_set.estimate_volume_mc()
     calcP.prob(self.disc)
Exemplo n.º 5
0
 def setUp(self):
     """
     Set up problem.
     """
     super(Test_prob_3to1, self).setUp()
     self.disc._input_sample_set.estimate_volume_mc()
     calcP.prob(self.disc)
     self.P_ref = np.loadtxt(data_path + "/3to1_prob.txt.gz")
Exemplo n.º 6
0
 def setUp(self):
     """
     Set up problem.
     """
     super(Test_prob_3to1, self).setUp()
     self.disc._input_sample_set.estimate_volume_mc()
     calcP.prob(self.disc)
     self.P_ref = np.loadtxt(data_path + "/3to1_prob.txt.gz")
Exemplo n.º 7
0
def postprocess(station_nums, ref_num):

    filename = 'P_q' + str(station_nums[0] + 1) + \
        '_q' + str(station_nums[1] + 1)
    if len(station_nums) == 3:
        filename += '_q' + str(station_nums[2] + 1)
    filename += '_ref_' + str(ref_num + 1)

    data = Q[:, station_nums]
    output_sample_set = sample.sample_set(data.shape[1])
    output_sample_set.set_values(data)
    q_ref = Q_ref[ref_num, station_nums]

    # Create Simple function approximation
    # Save points used to parition D for simple function approximation and the
    # approximation itself (this can be used to make close comparisions...)
    output_probability_set = sfun.regular_partition_uniform_distribution_rectangle_scaled(
        output_sample_set,
        q_ref,
        rect_scale=0.15,
        cells_per_dimension=np.ones((data.shape[1], )))

    num_l_emulate = 1e4
    set_emulated = bsam.random_sample_set('r', lam_domain, num_l_emulate)
    my_disc = sample.discretization(input_sample_set,
                                    output_sample_set,
                                    output_probability_set,
                                    emulated_input_sample_set=set_emulated)

    print("Finished emulating lambda samples")

    # Calculate P on lambda emulate
    print("Calculating prob_on_emulated_samples")
    calcP.prob_on_emulated_samples(my_disc)
    sample.save_discretization(my_disc, filename,
                               "prob_on_emulated_samples_solution")

    # Calclate P on the actual samples with assumption that voronoi cells have
    # equal size
    input_sample_set.estimate_volume_mc()
    print("Calculating prob")
    calcP.prob(my_disc)
    sample.save_discretization(my_disc, filename, "prob_solution")

    # Calculate P on the actual samples estimating voronoi cell volume with MC
    # integration
    calcP.prob_with_emulated_volumes(my_disc)
    print("Calculating prob_with_emulated_volumes")
    sample.save_discretization(my_disc, filename,
                               "prob_with_emulated_volumes_solution")
Exemplo n.º 8
0
def postprocess(station_nums, ref_num):
    
    filename = 'P_q'+str(station_nums[0]+1)+'_q'+str(station_nums[1]+1)
    if len(station_nums) == 3:
        filename += '_q'+str(station_nums[2]+1)
    filename += '_ref_'+str(ref_num+1)

    data = Q[:, station_nums]
    q_ref = Q_ref[ref_num, station_nums]

    # Create Simple function approximation
    # Save points used to parition D for simple function approximation and the
    # approximation itself (this can be used to make close comparisions...)
    (rho_D_M, d_distr_samples, d_Tree) = sfun.uniform_hyperrectangle(data,
            q_ref, bin_ratio=0.15,
            center_pts_per_edge=np.ones((data.shape[1],)))
    mdict = dict()
    mdict['rho_D_M'] = rho_D_M
    mdict['d_distr_samples'] = d_distr_samples

    # Calclate P on the actual samples with assumption that voronoi cells have
    # equal size
    (P1, lam_vol1, io_ptr1) = calcP.prob(samples, data, rho_D_M,
            d_distr_samples, d_Tree)
    print "Calculating prob"
    mdict['P1'] = P1
    mdict['lam_vol1'] = lam_vol1
    mdict['lem1'] = samples
    mdict['io_ptr1'] = io_ptr1

    # Export P and compare to MATLAB solution visually
    sio.savemat(filename, mdict, do_compression=True)
Exemplo n.º 9
0
def postprocess(station_nums, ref_num):
    
    filename = 'P_q'+str(station_nums[0]+1)+'_q'+str(station_nums[1]+1)
    if len(station_nums) == 3:
        filename += '_q'+str(station_nums[2]+1)
    filename += '_ref_'+str(ref_num+1)

    data = Q[:, station_nums]
    q_ref = Q_ref[ref_num, station_nums]

    # Create Simple function approximation
    # Save points used to parition D for simple function approximation and the
    # approximation itself (this can be used to make close comparisions...)
    (rho_D_M, d_distr_samples, d_Tree) = sfun.uniform_hyperrectangle(data,
            q_ref, bin_ratio=0.15,
            center_pts_per_edge=np.ones((data.shape[1],)))

    num_l_emulate = 1e6
    lambda_emulate = calcP.emulate_iid_lebesgue(lam_domain, num_l_emulate)
    
    if comm.rank == 0:
        print "Finished emulating lambda samples"
        mdict = dict()
        mdict['rho_D_M'] = rho_D_M
        mdict['d_distr_samples'] = d_distr_samples 
        mdict['num_l_emulate'] = num_l_emulate

    # Calculate P on lambda emulate
    (P0, lem0, io_ptr0, emulate_ptr0) = calcP.prob_emulated(samples, data,
            rho_D_M, d_distr_samples, lambda_emulate, d_Tree)
    if comm.rank == 0:
        print "Calculating prob_emulated"
        mdict['P0'] = P0
        mdict['lem0'] = lem0
        mdict['io_ptr0'] = io_ptr0
        mdict['emulate_ptr0'] = emulate_ptr0

    # Calclate P on the actual samples with assumption that voronoi cells have
    # equal size
    (P1, lam_vol1, io_ptr1) = calcP.prob(samples, data,
            rho_D_M, d_distr_samples, d_Tree)
    if comm.rank == 0:
        print "Calculating prob"
        mdict['P1'] = P1
        mdict['lam_vol1'] = lam_vol1
        mdict['lem1'] = samples
        mdict['io_ptr1'] = io_ptr1

    # Calculate P on the actual samples estimating voronoi cell volume with MC
    # integration
    (P3, lam_vol3, lambda_emulate3, io_ptr3, emulate_ptr3) = calcP.prob_mc(samples,
            data, rho_D_M, d_distr_samples, lambda_emulate, d_Tree)
    if comm.rank == 0:
        print "Calculating prob_mc"
        mdict['P3'] = P3
        mdict['lam_vol3'] = lam_vol3
        mdict['io_ptr3'] = io_ptr3
        mdict['emulate_ptr3'] = emulate_ptr3
        # Export P
        sio.savemat(filename, mdict, do_compression=True)
Exemplo n.º 10
0
 def setUp(self):
     """
     Set up problem.
     """
     super(Test_prob_10to4, self).setUp()
     (self.P, self.lam_vol, _) = calcP.prob(samples=self.samples,
             data=self.data, rho_D_M=self.d_distr_prob,
             d_distr_samples=self.d_distr_samples, d_Tree=self.d_Tree)
Exemplo n.º 11
0
 def setUp(self):
     """
     Set up problem.
     """
     super(Test_prob_3to1, self).setUp()
     (self.P, self.lam_vol, _) = calcP.prob(samples=self.samples,
             data=self.data, rho_D_M=self.d_distr_prob,
             d_distr_samples=self.d_distr_samples, d_Tree=self.d_Tree)
     self.P_ref = np.loadtxt(data_path + "/3to1_prob.txt.gz")
Exemplo n.º 12
0
def postprocess(station_nums, ref_num):
    
    filename = 'P_q'+str(station_nums[0]+1)+'_q'
    if len(station_nums) == 3:
        filename += '_q'+str(station_nums[2]+1)
    filename += '_ref_'+str(ref_num+1)

    data = Q[:, station_nums]
    q_ref = Q_ref[ref_num, station_nums]

    # Create Simple function approximation
    # Save points used to parition D for simple function approximation and the
    # approximation itself (this can be used to make close comparisions...)
    (rho_D_M, d_distr_samples, d_Tree) = sfun.uniform_hyperrectangle(data,
            q_ref, bin_ratio=0.15,
            center_pts_per_edge=np.ones((data.shape[1],)))

    num_l_emulate = 1e6
    lambda_emulate = calcP.emulate_iid_lebesgue(lam_domain, num_l_emulate)
    print "Finished emulating lambda samples"

    mdict = dict()
    mdict['rho_D_M'] = rho_D_M
    mdict['d_distr_samples'] = d_distr_samples 
    mdict['num_l_emulate'] = num_l_emulate
    mdict['lambda_emulate'] = lambda_emulate

    # Calculate P on lambda emulate
    (P0, lem0, io_ptr0, emulate_ptr0) = calcP.prob_emulated(samples, data,
            rho_D_M, d_distr_samples, lambda_emulate, d_Tree)
    print "Calculating prob_emulated"
    mdict['P0'] = P0
    mdict['lem0'] = lem0
    mdict['io_ptr0'] = io_ptr0
    mdict['emulate_ptr0'] = emulate_ptr0

    # Calclate P on the actual samples with assumption that voronoi cells have
    # equal size
    (P1, lam_vol1, io_ptr1) = calcP.prob(samples, data,
            rho_D_M, d_distr_samples, d_Tree)
    print "Calculating prob"
    mdict['P1'] = P1
    mdict['lam_vol1'] = lam_vol1
    mdict['lem1'] = samples
    mdict['io_ptr1'] = io_ptr1

    # Calculate P on the actual samples estimating voronoi cell volume with MC
    # integration
    (P3, lam_vol3, lambda_emulate3, io_ptr3, emulate_ptr3) = calcP.prob_mc(samples,
            data, rho_D_M, d_distr_samples, lambda_emulate, d_Tree)
    print "Calculating prob_mc"
    mdict['P3'] = P3
    mdict['lam_vol3'] = lam_vol3
    mdict['io_ptr3'] = io_ptr3
    mdict['emulate_ptr3'] = emulate_ptr3
    # Export P
    sio.savemat(filename, mdict, do_compression=True)
Exemplo n.º 13
0
 def setUp(self):
     """
     Set up problem.
     """
     super(Test_prob_1to1, self).setUp()
     (self.P, self.lam_vol,
      _) = calcP.prob(samples=self.samples,
                      data=self.data,
                      rho_D_M=self.d_distr_prob,
                      d_distr_samples=self.d_distr_samples,
                      d_Tree=self.d_Tree)
Exemplo n.º 14
0
 def setUp(self):
     """
     Set up problem.
     """
     super(Test_prob_3to1, self).setUp()
     (self.P, self.lam_vol,
      _) = calcP.prob(samples=self.samples,
                      data=self.data,
                      rho_D_M=self.d_distr_prob,
                      d_distr_samples=self.d_distr_samples,
                      d_Tree=self.d_Tree)
     self.P_ref = np.loadtxt(data_path + "/3to1_prob.txt.gz")
Exemplo n.º 15
0
def postprocess(station_nums, ref_num):

    filename = 'P_q' + str(station_nums[0] + 1) + '_q' + str(station_nums[1] +
                                                             1)
    if len(station_nums) == 3:
        filename += '_q' + str(station_nums[2] + 1)
    filename += '_ref_' + str(ref_num + 1)

    data = Q[:, station_nums]
    q_ref = Q_ref[ref_num, station_nums]

    # Create Simple function approximation
    # Save points used to parition D for simple function approximation and the
    # approximation itself (this can be used to make close comparisions...)
    (rho_D_M, d_distr_samples,
     d_Tree) = sfun.uniform_hyperrectangle(data,
                                           q_ref,
                                           bin_ratio=0.15,
                                           center_pts_per_edge=np.ones(
                                               (data.shape[1], )))
    mdict = dict()
    mdict['rho_D_M'] = rho_D_M
    mdict['d_distr_samples'] = d_distr_samples

    # Calclate P on the actual samples with assumption that voronoi cells have
    # equal size
    (P1, lam_vol1, io_ptr1) = calcP.prob(samples, data, rho_D_M,
                                         d_distr_samples, d_Tree)
    print "Calculating prob"
    mdict['P1'] = P1
    mdict['lam_vol1'] = lam_vol1
    mdict['lem1'] = samples
    mdict['io_ptr1'] = io_ptr1

    # Export P and compare to MATLAB solution visually
    sio.savemat(filename, mdict, do_compression=True)
Exemplo n.º 16
0
Partition_discretization = sampler.create_random_discretization('random',
                                                            Partition_set,
                                                            num_samples=num_samples_discretize_D)

Monte_Carlo_discretization = sampler.create_random_discretization('random',
                                                            Monte_Carlo_set,
                                                            num_samples=num_iid_samples)

# Compute the simple function approximation to the distribution on the data space
simpleFunP.user_partition_user_distribution(my_discretization,
                                            Partition_discretization,
                                            Monte_Carlo_discretization)

# Calculate probabilities
calculateP.prob(my_discretization)

########################################
# Post-process the results (optional)
########################################
# Show some plots of the different sample sets
plotD.scatter_2D(my_discretization._input_sample_set,
                 filename = 'Parameter_Samples',
                 file_extension = '.eps')
plotD.scatter_2D(my_discretization._output_sample_set,
                 filename = 'QoI_Samples',
                 file_extension = '.eps')
plotD.scatter_2D(my_discretization._output_probability_set,
                 filename = 'Data_Space_Discretization',
                 file_extension = '.eps')
'''
Exemplo n.º 17
0
    def calculate_prob_for_sample_set_region(self,
                                             s_set,
                                             regions,
                                             update_input=True):
        """
        Solves stochastic inverse problem based on surrogate points and the
        MC assumption. Calculates the probability of a regions of input space
        and error estimates for those probabilities.

        :param: s_set: sample set for which to calculate error
        :type s_set: :class:`bet.sample.sample_set_base`
        :param region: list of regions of s_set for which to calculate error
        :type region: list
        :param update_input: whether or not to update probabilities and
            errror identifiers for input discretization
        :type update_input: bool

        :rtype: tuple
        :returns: (probabilities, ``error_estimates``), the probability and
            error estimates for the region
        
        """
        if not hasattr(self, 'surrogate_discretization'):
            msg = "surrogate discretization has not been created"
            raise calculateError.wrong_argument_type(msg)
        if not isinstance(s_set, sample.sample_set_base):
            msg = "s_set must be of type bet.sample.sample_set_base"
            raise calculateError.wrong_argument_type(msg)

        # Calculate probability of region
        if self.surrogate_discretization._input_sample_set._volumes_local\
                is None:
            self.surrogate_discretization._input_sample_set.\
                    estimate_volume_mc(globalize=False)
        calculateP.prob(self.surrogate_discretization, globalize=False)
        prob_new_values = calculateP.prob_from_sample_set(\
                self.surrogate_discretization._input_sample_set, s_set)

        # Calculate for each region
        probabilities = []
        error_estimates = []
        for region in regions:
            marker = np.equal(s_set._region, region)
            probability = np.sum(prob_new_values[marker])

            # Calculate error estimate for region
            model_error = calculateError.model_error(\
                    self.surrogate_discretization)
            error_estimate = model_error.calculate_for_sample_set_region_mc(\
                    s_set, region)
            probabilities.append(probability)
            error_estimates.append(error_estimate)
        # Update input only if 1 region is given
        if update_input:
            num = self.input_disc._input_sample_set.check_num()
            prob = np.zeros((num, ))
            error_id = np.zeros((num, ))
            for i in range(num):
                Itemp = np.equal(self.dummy_disc._emulated_ii_ptr_local, i)
                prob_sum = np.sum(self.surrogate_discretization.\
                        _input_sample_set._probabilities_local[Itemp])
                prob[i] = comm.allreduce(prob_sum, op=MPI.SUM)
                error_id_sum = np.sum(self.surrogate_discretization.\
                        _input_sample_set._error_id_local[Itemp])
                error_id[i] = comm.allreduce(error_id_sum, op=MPI.SUM)
            self.input_disc._input_sample_set.set_probabilities(prob)
            self.input_disc._input_sample_set.set_error_id(error_id)

        return (probabilities, error_estimates)
Exemplo n.º 18
0
        # RESULTS WHERE SAMPLES = LAMBDA_EMULATE
        # samples are on a regular grid
        # result_wtree, result_wsamples, result_emulated_rg
        self.result_emulated_rg = calcP.prob_emulated(self.r_samples, self.r_data,
                self.rho_D_M, self.d_distr_samples, self.lam_domain,
                self.r_samples, self.d_Tree)
        self.result_wtree = result_emulated_rg
        self.result_wsamples = result_emulated_rg
        self.result_wotree = calcP.prob_emulated(self.r_samples, self.r_data,
                self.rho_D_M, self.d_distr_samples, self.lam_domain,
                self.r_samples)
        self.result_wosamples = calcP.prob_emulated(self.r_samples, self.r_data,
                self.rho_D_M, self.d_distr_samples, self.lam_domain)

        self.result_prob_rg = calcP.prob(self.r_samples, self.r_data,
                self.rho_D_M, self.d_distr_samples, self.lam_domain,
                self.d_Tree)
        self.result_mc_rg = calcP.prob_mc(self.r_samples, self.r_data,
                self.rho_D_M, self.d_distr_samples, self.lam_domain,
                self.r_samples, self.d_Tree)

        # samples are iid
        self.result_emulated_iid = calcP.prob_emulated(self.u_samples, self.u_data,
                self.rho_D_M, self.d_distr_samples, self.lam_domain,
                self.u_samples, self.d_Tree)
        self.result_prob_iid = calcP.prob(self.u_samples, self.u_data,
                self.rho_D_M, self.d_distr_samples, self.lam_domain,
                self.d_Tree)
        self.result_mc_iid = calcP.prob_mc(self.u_samples, self.u_data,
                self.rho_D_M, self.d_distr_samples, self.lam_domain,
Exemplo n.º 19
0
#QoI_indices = [0, 3, 5, 8, 9]
#QoI_indices = [3, 4, 5, 8, 9]
#QoI_indices = [2, 3, 5, 6, 9]

# Restrict the data to have just QoI_indices
data = data[:, QoI_indices]
Q_ref = Q[QoI_indices, :].dot(0.5 * np.ones(Lambda_dim))
# bin_ratio defines the uncertainty in our data
bin_ratio = 0.25

# Find the simple function approximation
(d_distr_prob, d_distr_samples, d_Tree) = simpleFunP.uniform_hyperrectangle(\
    data=data, Q_ref=Q_ref, bin_ratio=bin_ratio, center_pts_per_edge = 1)

# Calculate probablities making the Monte Carlo assumption
(P,  lam_vol, io_ptr) = calculateP.prob(samples=samples, data=data,
    rho_D_M=d_distr_prob, d_distr_samples=d_distr_samples)

percentile = 1.0
# Sort samples by highest probability density and find how many samples lie in
# the support of the inverse solution.  With the Monte Carlo assumption, this
# also tells us the approximate volume of this support.
(num_samples, P_high, samples_high, lam_vol_high, data_high) =\
    postTools.sample_highest_prob(top_percentile=percentile, P_samples=P,
    samples=samples, lam_vol=lam_vol,data = data,sort=True)

# Print the number of samples that make up the highest percentile percent
# samples and ratio of the volume of the parameter domain they take up
if comm.rank == 0:
    print (num_samples, np.sum(lam_vol_high))
Exemplo n.º 20
0
    def calculate_prob_for_sample_set_region(self, s_set, 
                                             regions, update_input=True):
        """
        Solves stochastic inverse problem based on surrogate points and the
        MC assumption. Calculates the probability of a regions of input space
        and error estimates for those probabilities.

        :param: s_set: sample set for which to calculate error
        :type s_set: :class:`bet.sample.sample_set_base`
        :param region: list of regions of s_set for which to calculate error
        :type region: list
        :param update_input: whether or not to update probabilities and
            errror identifiers for input discretization
        :type update_input: bool

        :rtype: tuple
        :returns: (probabilities, ``error_estimates``), the probability and
            error estimates for the region
        
        """
        if not hasattr(self, 'surrogate_discretization'):
            msg = "surrogate discretization has not been created"
            raise calculateError.wrong_argument_type(msg)
        if not isinstance(s_set, sample.sample_set_base):
            msg = "s_set must be of type bet.sample.sample_set_base"
            raise calculateError.wrong_argument_type(msg)
            
        # Calculate probability of region 
        if self.surrogate_discretization._input_sample_set._volumes_local\
                is None:
            self.surrogate_discretization._input_sample_set.\
                    estimate_volume_mc(globalize=False)
        calculateP.prob(self.surrogate_discretization, globalize=False)
        prob_new_values = calculateP.prob_from_sample_set(\
                self.surrogate_discretization._input_sample_set, s_set)
        
        # Calculate for each region
        probabilities = []
        error_estimates = []
        for region in regions:
            marker = np.equal(s_set._region, region)
            probability = np.sum(prob_new_values[marker])

            # Calculate error estimate for region
            model_error = calculateError.model_error(\
                    self.surrogate_discretization)
            error_estimate = model_error.calculate_for_sample_set_region_mc(\
                    s_set, region)
            probabilities.append(probability)
            error_estimates.append(error_estimate)
        # Update input only if 1 region is given
        if update_input:
            num = self.input_disc._input_sample_set.check_num()
            prob = np.zeros((num,))
            error_id = np.zeros((num,))
            for i in range(num):
                Itemp = np.equal(self.dummy_disc._emulated_ii_ptr_local, i)
                prob_sum = np.sum(self.surrogate_discretization.\
                        _input_sample_set._probabilities_local[Itemp])
                prob[i] = comm.allreduce(prob_sum, op=MPI.SUM)
                error_id_sum = np.sum(self.surrogate_discretization.\
                        _input_sample_set._error_id_local[Itemp])
                error_id[i] = comm.allreduce(error_id_sum, op=MPI.SUM)
            self.input_disc._input_sample_set.set_probabilities(prob)
            self.input_disc._input_sample_set.set_error_id(error_id)
                    
        return (probabilities, error_estimates)
Exemplo n.º 21
0
'''

# Restrict the data to have just QoI_indices
data = data[:, QoI_indices]
Q_ref = Q[QoI_indices, :].dot(0.5 * np.ones(Lambda_dim))

# bin_ratio defines the uncertainty in our data
bin_ratio = 0.25

# Find the simple function approximation
(d_distr_prob, d_distr_samples, d_Tree) = simpleFunP.uniform_hyperrectangle(\
    data=data, Q_ref=Q_ref, bin_ratio=bin_ratio, center_pts_per_edge = 1)

# Calculate probablities making the Monte Carlo assumption
(P, lam_vol, io_ptr) = calculateP.prob(samples=samples,
                                       data=data,
                                       rho_D_M=d_distr_prob,
                                       d_distr_samples=d_distr_samples)

percentile = 1.0
# Sort samples by highest probability density and find how many samples lie in
# the support of the inverse solution.  With the Monte Carlo assumption, this
# also tells us the approximate volume of this support.
(num_samples, P_high, samples_high, lam_vol_high, data_high) =\
    postTools.sample_highest_prob(top_percentile=percentile, P_samples=P,
    samples=samples, lam_vol=lam_vol,data = data,sort=True)

# Print the number of samples that make up the highest percentile percent
# samples and ratio of the volume of the parameter domain they take up
if comm.rank == 0:
    print(num_samples, np.sum(lam_vol_high))