예제 #1
0
    def setUp(self):
        """
        Set up problem.
        """
        super(uniform_hyperrectangle_size_list, self).setUp()
        if type(self.Q_ref) != np.array:
            Q_ref = np.array([self.Q_ref])
        else:
            Q_ref = self.Q_ref
        if len(self.data_domain.shape) == 1:
            data_domain = np.expand_dims(self.data_domain, axis=0)
        else:
            data_domain = self.data_domain

        self.rect_domain = np.zeros((data_domain.shape[0], 2))
        binsize = 1.0*np.ones((data_domain.shape[0],))
        r_width = binsize

        self.rect_domain[:, 0] = Q_ref - .5*r_width
        self.rect_domain[:, 1] = Q_ref + .5*r_width

        self.rho_D_M, self.d_distr_samples, self.d_Tree = sFun.uniform_hyperrectangle_binsize(self.data, 
                self.Q_ref, binsize, self.center_pts_per_edge)
# the parameter space.  We choose the set of QoIs to consider.

QoI_indices = [0, 7]  # choose up to Lambda_dim
#QoI_indices = [0, 1]
#QoI_indices = [0, 7, 34, 39, 90]
#QoI_indices = [0, 1, 2, 3, 4]

# Restrict the data to have just QoI_indices
data = data[:, QoI_indices]
Q_ref = Q[QoI_indices, :].dot(0.5 * np.ones(Lambda_dim))
# bin_size defines the uncertainty in our data
bin_size = 0.25

# Find the simple function approximation
(d_distr_prob, d_distr_samples, d_Tree) =\
    simpleFunP.uniform_hyperrectangle_binsize(data=data, Q_ref=Q_ref,
    bin_size=bin_size, center_pts_per_edge = 1)

# Calculate probablities making the Monte Carlo assumption
(P, lam_vol, io_ptr) = calculateP.prob(samples=samples,
                                       data=data,
                                       rho_D_M=d_distr_prob,
                                       d_distr_samples=d_distr_samples)

percentile = 1.0
# Sort samples by highest probability density and find how many samples lie in
# the support of the inverse solution.  With the Monte Carlo assumption, this
# also tells us the approximate volume of this support.
(num_samples, P_high, samples_high, lam_vol_high, data_high) =\
    postTools.sample_highest_prob(top_percentile=percentile, P_samples=P,
    samples=samples, lam_vol=lam_vol,data = data,sort=True)
예제 #3
0
# the parameter space.  We choose the set of QoIs to consider.

QoI_indices = [0, 7] # choose up to Lambda_dim
#QoI_indices = [0, 1]
#QoI_indices = [0, 7, 34, 39, 90]
#QoI_indices = [0, 1, 2, 3, 4]

# Restrict the data to have just QoI_indices
data = data[:, QoI_indices]
Q_ref = Q[QoI_indices, :].dot(0.5 * np.ones(Lambda_dim))
# bin_size defines the uncertainty in our data
bin_size = 0.25

# Find the simple function approximation
(d_distr_prob, d_distr_samples, d_Tree) =\
    simpleFunP.uniform_hyperrectangle_binsize(data=data, Q_ref=Q_ref,
    bin_size=bin_size, center_pts_per_edge = 1)

# Calculate probablities making the Monte Carlo assumption
(P,  lam_vol, io_ptr) = calculateP.prob(samples=samples, data=data,
    rho_D_M=d_distr_prob, d_distr_samples=d_distr_samples)

percentile = 1.0
# Sort samples by highest probability density and find how many samples lie in
# the support of the inverse solution.  With the Monte Carlo assumption, this
# also tells us the approximate volume of this support.
(num_samples, P_high, samples_high, lam_vol_high, data_high, _) =\
    postTools.sample_highest_prob(top_percentile=percentile, P_samples=P,
    samples=samples, lam_vol=lam_vol, data = data, sort=True)

# Print the number of samples that make up the highest percentile percent
# samples and ratio of the volume of the parameter domain they take up