Esempio n. 1
0
    def test_chooseOptQoIs_large(self):
        """
        Test :meth:`bet.sensitivity.chooseQoIs.chooseOptQoIs_large`.
        """
        self.qoiIndices = range(0, self.num_qois)
        best_sets = cQoIs.chooseOptQoIs_large(self.G, qoiIndices=self.qoiIndices,
            inner_prod_tol=self.inner_prod_tol, cond_tol=self.cond_tol)

        # Test that the best_sets have conditiobn number less than the tolerance
        for Ldim in range(self.Lambda_dim - 1):
            nptest.assert_array_less(best_sets[Ldim][:, 0], self.cond_tol)
Esempio n. 2
0
    def test_chooseOptQoIs_large(self):
        """
        Test :meth:`bet.sensitivity.chooseQoIs.chooseOptQoIs_large`.
        """
        self.qoiIndices = range(0, self.output_dim)
        best_sets = cQoIs.chooseOptQoIs_large(self.input_set_centers,
            qoiIndices=self.qoiIndices, inner_prod_tol=self.inner_prod_tol,
            measskew_tol=self.measskew_tol)

        if self.measskew_tol == np.inf:
            self.measskew_tol = sys.float_info[0]
        # Test that the best_sets have condition number less than the tolerance
        for Ldim in range(self.input_dim - 1):
            inds = best_sets[Ldim][:, 0] != np.inf
            nptest.assert_array_less(best_sets[Ldim][inds, 0],
                self.measskew_tol)
Esempio n. 3
0
    def test_chooseOptQoIs_large(self):
        """
        Test :meth:`bet.sensitivity.chooseQoIs.chooseOptQoIs_large`.
        """
        self.qoiIndices = range(0, self.num_qois)
        best_sets = cQoIs.chooseOptQoIs_large(
            self.G,
            qoiIndices=self.qoiIndices,
            inner_prod_tol=self.inner_prod_tol,
            cond_tol=self.cond_tol)

        if self.cond_tol == np.inf:
            self.cond_tol = sys.float_info[0]
        # Test that the best_sets have condition number less than the tolerance
        for Ldim in range(self.Lambda_dim - 1):
            inds = best_sets[Ldim][:, 0] != np.inf
            nptest.assert_array_less(best_sets[Ldim][inds, 0], self.cond_tol)
Esempio n. 4
0
cluster_discretization = sample.discretization(input_samples, output_samples)
# We will approximate the jacobian at each of the centers
center_discretization = grad.calculate_gradients_rbf(cluster_discretization,
                                                     num_centers,
                                                     normalize=True)

# With these gradient vectors, we are now ready to choose an optimal set of
# QoIs to use in the inverse problem, based on optimal skewness properites of
# QoI vectors.  The most robust method for this is
# :meth:~bet.sensitivity.chooseQoIs.chooseOptQoIs_large which returns the
# best set of 2, 3, 4 ... until input_dim.  This method returns a list of
# matrices.  Each matrix has 10 rows, the first column representing the
# average skewness of the Jacobian of Q, and the rest of the columns
# the corresponding QoI indices.
input_samples_center = center_discretization.get_input_sample_set()
best_sets = cqoi.chooseOptQoIs_large(input_samples_center, measure=False)

###############################################################################

# At this point we have determined the optimal set of QoIs to use in the inverse
# problem.  Now we compare the support of the inverse solution using
# different sets of these QoIs.  We set Q_ref to correspond to the center of
# the parameter space.  We choose the set of QoIs to consider.

QoI_indices = [3, 4]  # choose up to input_dim
#QoI_indices = [3, 6]
#QoI_indices = [0, 3]
#QoI_indices = [3, 5, 6, 8, 9]
#QoI_indices = [0, 3, 5, 8, 9]
#QoI_indices = [3, 4, 5, 8, 9]
#QoI_indices = [2, 3, 5, 6, 9]
# determine the uncertainty in our data.
cluster_discretization = sample.discretization(input_samples, output_samples)
# We will approximate the jacobian at each of the centers
center_discretization = grad.calculate_gradients_rbf(cluster_discretization,
        num_centers, normalize=False)

# With these gradient vectors, we are now ready to choose an optimal set of
# QoIs to use in the inverse problem, based on minimizing the support of the
# inverse solution (volume).  The most robust method for this is
# :meth:~bet.sensitivity.chooseQoIs.chooseOptQoIs_large which returns the
# best set of 2, 3, 4 ... until input_dim.  This method returns a list of
# matrices.  Each matrix has 10 rows, the first column representing the
# expected inverse volume ratio, and the rest of the columns the corresponding
# QoI indices.
input_samples_center = center_discretization.get_input_sample_set()
best_sets = cqoi.chooseOptQoIs_large(input_samples_center, max_qois_return=5,
    num_optsets_return=2, inner_prod_tol=0.9, measskew_tol=1E2, measure=True)

'''
We see here the expected volume ratios are small.  This number represents the
expected volume of the inverse image of a unit hypercube in the data space.
With the bin_size definition of the uncertainty in the data, here we expect to
see inverse solutions that have a smaller support (expected volume ratio < 1)
than the original volume of the hypercube in the data space.

This interpretation of the expected volume ratios is only valid for inverting
from a data space that has the same dimensions as the paramter space.  When
inverting into a higher dimensional space, this expected volume ratio is the
expected volume of the cross section of the inverse solution.
'''
###############################################################################
# *normalize* argument is set to *True* because we are using *bin_ratio* to
# determine the uncertainty in our data.
G = grad.calculate_gradients_rbf(samples,
                                 data,
                                 centers=samples[:num_centers, :],
                                 normalize=True)

# With these gradient vectors, we are now ready to choose an optimal set of
# QoIs to use in the inverse problem, based on minimizing the support of the
# inverse solution (volume).  The most robust method for this is
# :meth:~bet.sensitivity.chooseQoIs.chooseOptQoIs_large which returns the
# best set of 2, 3, 4 ... until Lambda_dim.  This method returns a list of
# matrices.  Each matrix has 10 rows, the first column representing the
# expected inverse volume ratio, and the rest of the columns the corresponding
# QoI indices.
best_sets = cQoI.chooseOptQoIs_large(G, volume=True)

###############################################################################

# At this point we have determined the optimal set of QoIs to use in the inverse
# problem.  Now we compare the support of the inverse solution using
# different sets of these QoIs.  We set Q_ref to correspond to the center of
# the parameter space.  We choose the set of QoIs to consider.

QoI_indices = [3, 6]  # choose up to Lambda_dim
#QoI_indices = [3, 4]
#QoI_indices = [8, 9]
#QoI_indices = [3, 5, 6, 8, 9]
#QoI_indices = [3, 4, 5, 8, 9]
#QoI_indices = [2, 3, 6, 8, 9]
#QoI_indices = [3, 5, 6, 7, 8]
Esempio n. 7
0
# determine the uncertainty in our data.
cluster_discretization = sample.discretization(input_samples, output_samples)
# We will approximate the jacobian at each of the centers
center_discretization = grad.calculate_gradients_rbf(cluster_discretization,
    num_centers, normalize=True)

# With these gradient vectors, we are now ready to choose an optimal set of
# QoIs to use in the inverse problem, based on optimal skewness properites of
# QoI vectors.  The most robust method for this is
# :meth:~bet.sensitivity.chooseQoIs.chooseOptQoIs_large which returns the
# best set of 2, 3, 4 ... until input_dim.  This method returns a list of
# matrices.  Each matrix has 10 rows, the first column representing the
# average skewness of the Jacobian of Q, and the rest of the columns
# the corresponding QoI indices.
input_samples_center = center_discretization.get_input_sample_set()
best_sets = cqoi.chooseOptQoIs_large(input_samples_center, measure=False)

###############################################################################

# At this point we have determined the optimal set of QoIs to use in the inverse
# problem.  Now we compare the support of the inverse solution using
# different sets of these QoIs.  We set Q_ref to correspond to the center of
# the parameter space.  We choose the set of QoIs to consider.

QoI_indices = [3, 4] # choose up to input_dim
#QoI_indices = [3, 6]
#QoI_indices = [0, 3]
#QoI_indices = [3, 5, 6, 8, 9]
#QoI_indices = [0, 3, 5, 8, 9]
#QoI_indices = [3, 4, 5, 8, 9]
#QoI_indices = [2, 3, 5, 6, 9]
G = grad.calculate_gradients_rbf(samples,
                                 data,
                                 centers=samples[:num_centers, :],
                                 normalize=False)

# With these gradient vectors, we are now ready to choose an optimal set of
# QoIs to use in the inverse problem, based on minimizing the support of the
# inverse solution (volume).  The most robust method for this is
# :meth:~bet.sensitivity.chooseQoIs.chooseOptQoIs_large which returns the
# best set of 2, 3, 4 ... until Lambda_dim.  This method returns a list of
# matrices.  Each matrix has 10 rows, the first column representing the
# expected inverse volume ratio, and the rest of the columns the corresponding
# QoI indices.
best_sets = cQoI.chooseOptQoIs_large(G,
                                     max_qois_return=5,
                                     num_optsets_return=2,
                                     inner_prod_tol=0.9,
                                     cond_tol=1E2,
                                     volume=True)
'''
We see here the expected volume ratios are small.  This number represents the
expected volume of the inverse image of a unit hypercube in the data space.
With the bin_size definition of the uncertainty in the data, here we expect to
see inverse solutions that have a smaller support (expected volume ratio < 1)
than the original volume of the hypercube in the data space.

This interpretation of the expected volume ratios is only valid for inverting
from a data space that has the same dimensions as the paramter space.  When
inverting into a higher dimensional space, this expected volume ratio is the
expected volume of the cross section of the inverse solution.
'''
###############################################################################
Esempio n. 9
0
# Calculate the gradient vectors at some subset of the samples.  Here the
# *normalize* argument is set to *True* because we are using bin_ratio to
# determine the uncertainty in our data.
G = grad.calculate_gradients_rbf(samples, data, centers=samples[:num_centers, :],
    normalize=True)

# With these gradient vectors, we are now ready to choose an optimal set of
# QoIs to use in the inverse problem, based on optimal skewness properites of
# QoI vectors.  The most robust method for this is
# :meth:~bet.sensitivity.chooseQoIs.chooseOptQoIs_large which returns the
# best set of 2, 3, 4 ... until Lambda_dim.  This method returns a list of
# matrices.  Each matrix has 10 rows, the first column representing the
# average condition number of the Jacobian of Q, and the rest of the columns
# the corresponding QoI indices.
best_sets = cQoI.chooseOptQoIs_large(G, volume=False)

###############################################################################

# At this point we have determined the optimal set of QoIs to use in the inverse
# problem.  Now we compare the support of the inverse solution using
# different sets of these QoIs.  We set Q_ref to correspond to the center of
# the parameter space.  We choose the set of QoIs to consider.

QoI_indices = [3, 4] # choose up to Lambda_dim
#QoI_indices = [3, 6]
#QoI_indices = [0, 3]
#QoI_indices = [3, 5, 6, 8, 9]
#QoI_indices = [0, 3, 5, 8, 9]
#QoI_indices = [3, 4, 5, 8, 9]
#QoI_indices = [2, 3, 5, 6, 9]
# determine the uncertainty in our data.
cluster_discretization = sample.discretization(input_samples, output_samples)
# We will approximate the jacobian at each of the centers
center_discretization = grad.calculate_gradients_rbf(cluster_discretization,
                                                     num_centers, normalize=False)

# With these gradient vectors, we are now ready to choose an optimal set of
# QoIs to use in the inverse problem, based on minimizing the support of the
# inverse solution (volume).  The most robust method for this is
# :meth:~bet.sensitivity.chooseQoIs.chooseOptQoIs_large which returns the
# best set of 2, 3, 4 ... until input_dim.  This method returns a list of
# matrices.  Each matrix has 10 rows, the first column representing the
# expected inverse volume ratio, and the rest of the columns the corresponding
# QoI indices.
input_samples_center = center_discretization.get_input_sample_set()
best_sets = cqoi.chooseOptQoIs_large(input_samples_center, max_qois_return=5,
                                     num_optsets_return=2, inner_prod_tol=0.9, measskew_tol=1E2, measure=True)

'''
We see here the expected volume ratios are small.  This number represents the
expected volume of the inverse image of a unit hypercube in the data space.
With the bin_size definition of the uncertainty in the data, here we expect to
see inverse solutions that have a smaller support (expected volume ratio < 1)
than the original volume of the hypercube in the data space.

This interpretation of the expected volume ratios is only valid for inverting
from a data space that has the same dimensions as the parameter space.  When
inverting into a higher dimensional space, this expected volume ratio is the
expected volume of the cross section of the inverse solution.
'''
###############################################################################
Esempio n. 11
0
# Calculate the gradient vectors at some subset of the samples.  Here the
# *normalize* argument is set to *True* because we are using *bin_ratio* to
# determine the uncertainty in our data.
G = grad.calculate_gradients_rbf(samples, data, centers=samples[:num_centers, :],
    normalize=True)

# With these gradient vectors, we are now ready to choose an optimal set of
# QoIs to use in the inverse problem, based on minimizing the support of the
# inverse solution (volume).  The most robust method for this is
# :meth:~bet.sensitivity.chooseQoIs.chooseOptQoIs_large which returns the
# best set of 2, 3, 4 ... until Lambda_dim.  This method returns a list of
# matrices.  Each matrix has 10 rows, the first column representing the
# expected inverse volume ratio, and the rest of the columns the corresponding
# QoI indices.
best_sets = cQoI.chooseOptQoIs_large(G, volume=True)

###############################################################################

# At this point we have determined the optimal set of QoIs to use in the inverse
# problem.  Now we compare the support of the inverse solution using
# different sets of these QoIs.  We set Q_ref to correspond to the center of
# the parameter space.  We choose the set of QoIs to consider.

QoI_indices = [3, 6] # choose up to Lambda_dim
#QoI_indices = [3, 4]
#QoI_indices = [8, 9]
#QoI_indices = [3, 5, 6, 8, 9]
#QoI_indices = [3, 4, 5, 8, 9]
#QoI_indices = [2, 3, 6, 8, 9]
#QoI_indices = [3, 5, 6, 7, 8]
# Calculate the gradient vectors at some subset of the samples.  Here the
# *normalize* argument is set to *False* because we are using bin_size to
# determine the uncertainty in our data.
G = grad.calculate_gradients_rbf(samples, data, centers=samples[:num_centers, :],
    normalize=False)

# With these gradient vectors, we are now ready to choose an optimal set of
# QoIs to use in the inverse problem, based on minimizing the support of the
# inverse solution (volume).  The most robust method for this is
# :meth:~bet.sensitivity.chooseQoIs.chooseOptQoIs_large which returns the
# best set of 2, 3, 4 ... until Lambda_dim.  This method returns a list of
# matrices.  Each matrix has 2 rows, the first column representing the
# expected inverse volume ratio, and the rest of the columns the corresponding
# QoI indices.
best_sets = cQoI.chooseOptQoIs_large(G, max_qois_return=5,
    num_optsets_return=2, inner_prod_tol=0.9, cond_tol=1E2, volume=True)

'''
We see here the expected volume ratios are small.  This number represents the
expected volume of the inverse image of a unit hypercube in the data space.
With the bin_size definition of the uncertainty in the data, here we expect to
see inverse solutions that have a smaller support (expected volume ratio < 1)
than the original volume of the hypercube in the data space.

This interpretation of the expected volume ratios is only valid for inverting
from a data space that has the same dimensions as the paramter space.  When
inverting into a higher dimensional space, this expected volume ratio is the
expected volume of the cross section of the inverse solution.
'''
###############################################################################