Beispiel #1
0
    def test_calculate_gradients_ffd(self):
        """
        Test :meth:`bet.sensitivity.gradients.calculate_gradients_ffd`.
        """
        self.output_set = sample.sample_set(self.output_dim)
        self.cluster_set = grad.pick_ffd_points(self.input_set_centers,
            self.rvec)
        num_centers = self.input_set_centers.check_num()
        self.output_set.set_values(self.cluster_set._values.dot(self.coeffs))
        self.cluster_disc = sample.discretization(self.cluster_set,
                self.output_set)

        self.center_disc = grad.calculate_gradients_ffd(self.cluster_disc)
        self.jacobians = self.center_disc._input_sample_set._jacobians

        # Test the method returns the correct size tensor
        self.assertEqual(self.jacobians.shape, 
                (self.num_centers, self.output_dim, self.input_dim))

        # Test that each vector is normalized or a zero vector
        normG = np.linalg.norm(self.jacobians, ord=1, axis=2)

        # If its a zero vectors, make it the unit vector in input_dim
        self.jacobians[normG==0] = 1.0/self.input_dim
        nptest.assert_array_almost_equal(np.linalg.norm(self.jacobians, ord=1,
            axis=2), np.ones((self.jacobians.shape[0],
                self.jacobians.shape[1])))
Beispiel #2
0
    def test_calculate_gradients_ffd_accuracy(self):
        """
        Test :meth:`bet.sensitivity.gradients.calculate_gradients_ffd`.
        """
        self.G_nonlin = grad.calculate_gradients_ffd(self.samples_ffd,
            self.data_nonlin_ffd, normalize=False)

        nptest.assert_array_almost_equal(self.G_nonlin - self.G_exact, 0, decimal = 2)
Beispiel #3
0
    def test_calculate_gradients_ffd_accuracy(self):
        """
        Test :meth:`bet.sensitivity.gradients.calculate_gradients_ffd`.
        """
        self.center_disc = grad.calculate_gradients_ffd(self.cluster_disc_ffd,
                                                        normalize=False)
        self.jacobians = self.center_disc._input_sample_set._jacobians

        nptest.assert_allclose(self.jacobians - self.G_exact, 0, atol=2)
Beispiel #4
0
    def test_calculate_gradients_ffd_accuracy(self):
        """
        Test :meth:`bet.sensitivity.gradients.calculate_gradients_ffd`.
        """
        self.center_disc = grad.calculate_gradients_ffd(\
            self.cluster_disc_ffd, normalize=False)
        self.jacobians = self.center_disc._input_sample_set._jacobians

        nptest.assert_allclose(self.jacobians - self.G_exact, 0,
                atol=2)
Beispiel #5
0
    def test_calculate_gradients_ffd_accuracy(self):
        """
        Test :meth:`bet.sensitivity.gradients.calculate_gradients_ffd`.
        """
        self.G_nonlin = grad.calculate_gradients_ffd(self.samples_ffd,
                                                     self.data_nonlin_ffd,
                                                     normalize=False)

        nptest.assert_array_almost_equal(self.G_nonlin - self.G_exact,
                                         0,
                                         decimal=2)
Beispiel #6
0
    def test_calculate_gradients_ffd(self):
        """
        Test :meth:`bet.sensitivity.gradients.calculate_gradients_ffd`.
        """
        self.samples = grad.pick_ffd_points(self.centers, self.rvec)
        self.data = self.samples.dot(self.coeffs)
        self.G = grad.calculate_gradients_ffd(self.samples, self.data)

        # Test the method returns the correct size tensor
        self.assertEqual(self.G.shape, (self.num_centers, self.num_qois,
            self.Lambda_dim))

        # Test that each vector is normalized
        normG = np.linalg.norm(self.G, axis=2)

        # If its a zero vectors, make it the unit vector in Lambda_dim
        self.G[normG==0] = 1.0/np.sqrt(self.Lambda_dim)
        nptest.assert_array_almost_equal(np.linalg.norm(self.G, axis=2),
            np.ones((self.G.shape[0], self.G.shape[1])))
Beispiel #7
0
    def test_calculate_gradients_ffd(self):
        """
        Test :meth:`bet.sensitivity.gradients.calculate_gradients_ffd`.
        """
        self.samples = grad.pick_ffd_points(self.centers, self.rvec)
        self.data = self.samples.dot(self.coeffs)
        self.G = grad.calculate_gradients_ffd(self.samples, self.data)

        # Test the method returns the correct size tensor
        self.assertEqual(self.G.shape,
                         (self.num_centers, self.num_qois, self.Lambda_dim))

        # Test that each vector is normalized
        normG = np.linalg.norm(self.G, ord=1, axis=2)

        # If its a zero vectors, make it the unit vector in Lambda_dim
        self.G[normG == 0] = 1.0 / self.Lambda_dim
        nptest.assert_array_almost_equal(
            np.linalg.norm(self.G, ord=1, axis=2),
            np.ones((self.G.shape[0], self.G.shape[1])))
Beispiel #8
0
# Set the input sample values from the imported file
input_samples.set_values(matfile['samples'])

# Set the data fromthe imported file
output_samples.set_values(matfile['data'])

# Create the cluster discretization
cluster_discretization = sample.discretization(input_samples, output_samples)

# Calculate the gradient vectors at each of the 16 centers for each of the
# QoI maps
if fd_scheme.upper() in ['RBF']:
    center_discretization = grad.calculate_gradients_rbf(
        cluster_discretization, normalize=False)
elif fd_scheme.upper() in ['FFD']:
    center_discretization = grad.calculate_gradients_ffd(
        cluster_discretization)
else:
    center_discretization = grad.calculate_gradients_cfd(
        cluster_discretization)

input_samples_centers = center_discretization.get_input_sample_set()

# Choose a specific set of QoIs to check the average skewness of
index1 = 0
index2 = 4
(specific_skewness, _) = cqoi.calculate_avg_skewness(input_samples_centers,
                                                     qoi_set=[index1, index2])
if comm.rank == 0:
    print 'The average skewness of the QoI map defined by indices ' + str(index1) + \
        ' and ' + str(index2) + ' is ' + str(specific_skewness)
Beispiel #9
0
# Set the input sample values from the imported file
input_samples.set_values(matfile['samples'])

# Set the data fromthe imported file
output_samples.set_values(matfile['data'])

# Create the cluster discretization
cluster_discretization = sample.discretization(input_samples, output_samples)

# Calculate the gradient vectors at each of the 16 centers for each of the
# QoI maps
if fd_scheme.upper() in ['RBF']:
    center_discretization = grad.calculate_gradients_rbf(cluster_discretization,
        normalize=False)
elif fd_scheme.upper() in ['FFD']:
    center_discretization = grad.calculate_gradients_ffd(cluster_discretization)
else:
    center_discretization = grad.calculate_gradients_cfd(cluster_discretization)

input_samples_centers = center_discretization.get_input_sample_set()

# Choose a specific set of QoIs to check the average skewness of
index1 = 0
index2 = 4
(specific_skewness, _) = cqoi.calculate_avg_skewness(input_samples_centers,
        qoi_set=[index1, index2])
if comm.rank == 0:
    print 'The average skewness of the QoI map defined by indices ' + str(index1) + \
        ' and ' + str(index2) + ' is ' + str(specific_skewness)

# Compute the skewness for each of the possible QoI maps determined by choosing