Beispiel #1
0
def calculate_gradients_cfd(samples, data, normalize=True):
    """
    Approximate gradient vectors at ``num_centers, centers.shape[0]`` points
    in the parameter space for each QoI map.  THIS METHOD IS DEPENDENT
    ON USING :meth:~bet.sensitivity.pick_cfd_points TO CHOOSE SAMPLES FOR THE 
    CFD STENCIL AROUND EACH CENTER.  THE ORDERING MATTERS.

    :param samples: Samples for which the model has been solved.
    :type samples: :class:`np.ndarray` of shape
        (2*Lambda_dim*num_centers, Lambda_dim)
    :param data: QoI values corresponding to each sample.
    :type data: :class:`np.ndarray` of shape (num_samples, Data_dim)

    :rtype: :class:`np.ndarray` of shape (num_samples, Data_dim, Lambda_dim)
    :returns: Tensor representation of the gradient vectors of each
        QoI map at each point in centers

    """
    num_model_samples = samples.shape[0]
    Lambda_dim = samples.shape[1]
    num_centers = num_model_samples / (2*Lambda_dim + 1)

    # Find rvec from the first cluster of samples
    rvec = samples[num_centers:num_centers + Lambda_dim, :] - samples[0, :]
    rvec = util.fix_dimensions_vector_2darray(rvec.diagonal())

    # Clean the data
    data = util.fix_dimensions_vector_2darray(util.clean_data(
        data[num_centers:]))
    num_qois = data.shape[1]
    gradient_tensor = np.zeros([num_centers, num_qois, Lambda_dim])

    rvec = np.tile(np.repeat(rvec, num_qois, axis=1), [num_centers, 1])

    # Construct indices for CFD gradient approxiation
    inds = np.repeat(range(0, 2 * Lambda_dim * num_centers, 2 * Lambda_dim),
        Lambda_dim) + np.tile(range(0, Lambda_dim), num_centers)
    inds = np.array([inds, inds+Lambda_dim]).transpose()

    gradient_mat = (data[inds[:, 0]] - data[inds[:, 1]]) * (0.5 / rvec)

    # Reshape and organize
    gradient_tensor = np.reshape(gradient_mat.transpose(), [num_qois,
        Lambda_dim, num_centers], order='F').transpose(2, 0, 1)

    if normalize:
        # Compute the norm of each vector
        norm_gradient_tensor = np.linalg.norm(gradient_tensor, axis=2)

        # If it is a zero vector (has 0 norm), set norm=1, avoid divide by zero
        norm_gradient_tensor[norm_gradient_tensor == 0] = 1.0

        # Normalize each gradient vector
        gradient_tensor = gradient_tensor/np.tile(norm_gradient_tensor,
            (Lambda_dim, 1, 1)).transpose(1, 2, 0)

    return gradient_tensor
Beispiel #2
0
def calculate_gradients_cfd(samples, data, normalize=True):
    """
    Approximate gradient vectors at ``num_centers, centers.shape[0]`` points
    in the parameter space for each QoI map.  THIS METHOD IS DEPENDENT
    ON USING :meth:~bet.sensitivity.pick_cfd_points TO CHOOSE SAMPLES FOR THE 
    CFD STENCIL AROUND EACH CENTER.  THE ORDERING MATTERS.
    :param samples: Samples for which the model has been solved.
    :type samples: :class:`np.ndarray` of shape
        (2*Lambda_dim*num_centers, Lambda_dim)
    :param data: QoI values corresponding to each sample.
    :type data: :class:`np.ndarray` of shape (num_samples, Data_dim)
    :param boolean normalize:  If normalize is True, normalize each gradient
        vector
    :rtype: :class:`np.ndarray` of shape (num_samples, Data_dim, Lambda_dim)
    :returns: Tensor representation of the gradient vectors of each
        QoI map at each point in centers
    """
    num_model_samples = samples.shape[0]
    Lambda_dim = samples.shape[1]
    num_centers = num_model_samples / (2*Lambda_dim + 1)

    # Find rvec from the first cluster of samples
    rvec = samples[num_centers:num_centers + Lambda_dim, :] - samples[0, :]
    rvec = util.fix_dimensions_vector_2darray(rvec.diagonal())

    # Clean the data
    data = util.fix_dimensions_vector_2darray(util.clean_data(
        data[num_centers:]))
    num_qois = data.shape[1]
    gradient_tensor = np.zeros([num_centers, num_qois, Lambda_dim])

    rvec = np.tile(np.repeat(rvec, num_qois, axis=1), [num_centers, 1])

    # Construct indices for CFD gradient approxiation
    inds = np.repeat(range(0, 2 * Lambda_dim * num_centers, 2 * Lambda_dim),
        Lambda_dim) + np.tile(range(0, Lambda_dim), num_centers)
    inds = np.array([inds, inds+Lambda_dim]).transpose()

    gradient_mat = (data[inds[:, 0]] - data[inds[:, 1]]) * (0.5 / rvec)

    # Reshape and organize
    gradient_tensor = np.reshape(gradient_mat.transpose(), [num_qois,
        Lambda_dim, num_centers], order='F').transpose(2, 0, 1)

    if normalize:
        # Compute the norm of each vector
        norm_gradient_tensor = np.linalg.norm(gradient_tensor, ord=1, axis=2)

        # If it is a zero vector (has 0 norm), set norm=1, avoid divide by zero
        norm_gradient_tensor[norm_gradient_tensor == 0] = 1.0

        # Normalize each gradient vector
        gradient_tensor = gradient_tensor/np.tile(norm_gradient_tensor,
            (Lambda_dim, 1, 1)).transpose(1, 2, 0)

    return gradient_tensor
Beispiel #3
0
def calculate_gradients_rbf(samples, data, centers=None, num_neighbors=None,
        RBF=None, ep=None, normalize=True):
    r"""
    Approximate gradient vectors at ``num_centers, centers.shape[0]`` points
    in the parameter space for each QoI map using a radial basis function
    interpolation method.
    :param samples: Samples for which the model has been solved.
    :type samples: :class:`np.ndarray` of shape (num_samples, Lambda_dim)
    :param data: QoI values corresponding to each sample.
    :type data: :class:`np.ndarray` of shape (num_samples, Data_dim)
    :param centers: Points in :math:`\Lambda` at which to approximate gradient
        information.
    :type centers: :class:`np.ndarray` of shape (num_exval, Lambda_dim)
    :param int num_neighbors: Number of nearest neighbors to use in gradient
        approximation. Default value is Lambda_dim + 2.
    :param string RBF: Choice of radial basis function. Default is Gaussian
    :param float ep: Choice of shape parameter for radial basis function.
        Default value is 1.0
    :param boolean normalize:  If normailze is True, normalize each gradient
        vector
    :rtype: :class:`np.ndarray` of shape (num_samples, Data_dim, Lambda_dim)
    :returns: Tensor representation of the gradient vectors of each
        QoI map at each point in centers
    """
    data = util.fix_dimensions_vector_2darray(util.clean_data(data))
    Lambda_dim = samples.shape[1]
    num_model_samples = samples.shape[0]
    Data_dim = data.shape[1]

    if num_neighbors is None:
        num_neighbors = Lambda_dim + 2
    if ep is None:
        ep = 1.0
    if RBF is None:
        RBF = 'Gaussian'

    # If centers is None we assume the user chose clusters of size
    # Lambda_dim + 2
    if centers is None:
        num_centers = num_model_samples / (Lambda_dim + 2)
        centers = samples[:num_centers]
    else:
        num_centers = centers.shape[0]

    rbf_tensor = np.zeros([num_centers, num_model_samples, Lambda_dim])
    gradient_tensor = np.zeros([num_centers, Data_dim, Lambda_dim])
    tree = spatial.KDTree(samples)

    # For each centers, interpolate the data using the rbf chosen and
    # then evaluate the partial derivative of that rbf at the desired point.
    for c in range(num_centers):
        # Find the k nearest neighbors and their distances to centers[c,:]
        [r, nearest] = tree.query(centers[c, :], k=num_neighbors)
        r = np.tile(r, (Lambda_dim, 1))

        # Compute the linf distances to each of the nearest neighbors
        diffVec = (centers[c, :] - samples[nearest, :]).transpose()

        # Compute the l2 distances between pairs of nearest neighbors
        distMat = spatial.distance_matrix(
            samples[nearest, :], samples[nearest, :])

        # Solve for the rbf weights using interpolation conditions and
        # evaluate the partial derivatives
        rbf_mat_values = \
            np.linalg.solve(radial_basis_function(distMat, RBF),
            radial_basis_function_dxi(r, diffVec, RBF, ep) \
            .transpose()).transpose()

        # Construct the finite difference matrices
        rbf_tensor[c, nearest, :] = rbf_mat_values.transpose()

    gradient_tensor = rbf_tensor.transpose(2, 0, 1).dot(data).transpose(1, 2, 0)

    if normalize:
        # Compute the norm of each vector
        norm_gradient_tensor = np.linalg.norm(gradient_tensor, ord=1, axis=2)

        # If it is a zero vector (has 0 norm), set norm=1, avoid divide by zero
        norm_gradient_tensor[norm_gradient_tensor == 0] = 1.0

        # Normalize each gradient vector
        gradient_tensor = gradient_tensor/np.tile(norm_gradient_tensor,
            (Lambda_dim, 1, 1)).transpose(1, 2, 0)

    return gradient_tensor
Beispiel #4
0
def calculate_gradients_cfd(cluster_discretization, normalize=True):
    """
    Approximate gradient vectors at ``num_centers, centers.shape[0]`` points
    in the parameter space for each QoI map.  THIS METHOD IS DEPENDENT
    ON USING :meth:~bet.sensitivity.pick_cfd_points TO CHOOSE SAMPLES FOR THE 
    CFD STENCIL AROUND EACH CENTER.  THE ORDERING MATTERS.
    
    :param cluster_discretization: Must contain input and output values for the
        sample clusters.
    :type cluster_discretization: :class:`~bet.sample.discretization`
    :param boolean normalize:  If normalize is True, normalize each gradient
        vector
    
    :rtype: :class:`~bet.sample.discretization`
    :returns: A new :class:`~bet.sample.discretization` that contains only the
        centers of the clusters and their associated ``_jacobians`` which are
        tensor representation of the gradient vectors of each QoI map at each
        point in centers :class:`numpy.ndarray` of shape (num_samples,
        output_dim, input_dim)
    
    """
    if cluster_discretization._input_sample_set.get_values() is None \
            or cluster_discretization._output_sample_set.get_values() is None:
        raise ValueError("You must have values to use this method.")
    samples = cluster_discretization._input_sample_set.get_values()
    data = cluster_discretization._output_sample_set.get_values()

    input_dim = cluster_discretization._input_sample_set.get_dim()
    num_model_samples = cluster_discretization.check_nums()
    output_dim = cluster_discretization._output_sample_set.get_dim()

    num_model_samples = cluster_discretization.check_nums()
    input_dim = cluster_discretization._input_sample_set.get_dim()

    num_centers = num_model_samples / (2*input_dim + 1)

    # Find radii_vec from the first cluster of samples
    radii_vec = samples[num_centers:num_centers + input_dim, :] - samples[0, :]
    radii_vec = util.fix_dimensions_vector_2darray(radii_vec.diagonal())

    # Clean the data
    data = util.clean_data(data[num_centers:])
    gradient_tensor = np.zeros([num_centers, output_dim, input_dim])

    radii_vec = np.tile(np.repeat(radii_vec, output_dim, axis=1), [num_centers,
        1])

    # Construct indices for CFD gradient approxiation
    inds = np.repeat(range(0, 2 * input_dim * num_centers, 2 * input_dim),
        input_dim) + np.tile(range(0, input_dim), num_centers)
    inds = np.array([inds, inds+input_dim]).transpose()

    gradient_mat = (data[inds[:, 0]] - data[inds[:, 1]]) * (0.5 / radii_vec)

    # Reshape and organize
    gradient_tensor = np.reshape(gradient_mat.transpose(), [output_dim,
        input_dim, num_centers], order='F').transpose(2, 0, 1)

    if normalize:
        # Compute the norm of each vector
        norm_gradient_tensor = np.linalg.norm(gradient_tensor, ord=1, axis=2)

        # If it is a zero vector (has 0 norm), set norm=1, avoid divide by zero
        norm_gradient_tensor[norm_gradient_tensor == 0] = 1.0

        # Normalize each gradient vector
        gradient_tensor = gradient_tensor/np.tile(norm_gradient_tensor,
            (input_dim, 1, 1)).transpose(1, 2, 0)

    center_input_sample_set = sample.sample_set(input_dim)
    center_input_sample_set.set_values(samples[:num_centers, :])
    if cluster_discretization._input_sample_set.get_domain() is not None:
        center_input_sample_set.set_domain(cluster_discretization.\
                _input_sample_set.get_domain())
    center_input_sample_set.set_jacobians(gradient_tensor)
    center_output_sample_set = sample.sample_set(output_dim)
    center_output_sample_set.set_values(data[:num_centers, :])
    if cluster_discretization._output_sample_set.get_domain() is not None:
        center_output_sample_set.set_domain(cluster_discretization.\
                _output_sample_set.get_domain())
    #center_output_sample_set.set_jacobians(gradient_tensor.transpose())
    center_discretization = sample.discretization(center_input_sample_set,
            center_output_sample_set)
    return center_discretization
Beispiel #5
0
def calculate_gradients_cfd(cluster_discretization, normalize=True):
    """
    Approximate gradient vectors at ``num_centers, centers.shape[0]`` points
    in the parameter space for each QoI map.  THIS METHOD IS DEPENDENT
    ON USING :meth:~bet.sensitivity.pick_cfd_points TO CHOOSE SAMPLES FOR THE 
    CFD STENCIL AROUND EACH CENTER.  THE ORDERING MATTERS.
    
    :param cluster_discretization: Must contain input and output values for the
        sample clusters.
    :type cluster_discretization: :class:`~bet.sample.discretization`
    :param boolean normalize:  If normalize is True, normalize each gradient
        vector
    
    :rtype: :class:`~bet.sample.discretization`
    :returns: A new :class:`~bet.sample.discretization` that contains only the
        centers of the clusters and their associated ``_jacobians`` which are
        tensor representation of the gradient vectors of each QoI map at each
        point in centers :class:`numpy.ndarray` of shape (num_samples,
        output_dim, input_dim)
    
    """
    if cluster_discretization._input_sample_set.get_values() is None \
            or cluster_discretization._output_sample_set.get_values() is None:
        raise ValueError("You must have values to use this method.")
    samples = cluster_discretization._input_sample_set.get_values()
    data = cluster_discretization._output_sample_set.get_values()

    input_dim = cluster_discretization._input_sample_set.get_dim()
    num_model_samples = cluster_discretization.check_nums()
    output_dim = cluster_discretization._output_sample_set.get_dim()

    num_model_samples = cluster_discretization.check_nums()
    input_dim = cluster_discretization._input_sample_set.get_dim()

    num_centers = num_model_samples / (2 * input_dim + 1)

    # Find radii_vec from the first cluster of samples
    radii_vec = samples[num_centers:num_centers + input_dim, :] - samples[0, :]
    radii_vec = util.fix_dimensions_vector_2darray(radii_vec.diagonal())

    # Clean the data
    data = util.clean_data(data[num_centers:])
    gradient_tensor = np.zeros([num_centers, output_dim, input_dim])

    radii_vec = np.tile(np.repeat(radii_vec, output_dim, axis=1),
                        [num_centers, 1])

    # Construct indices for CFD gradient approxiation
    inds = np.repeat(range(0, 2 * input_dim * num_centers, 2 * input_dim),
                     input_dim) + np.tile(range(0, input_dim), num_centers)
    inds = np.array([inds, inds + input_dim]).transpose()

    gradient_mat = (data[inds[:, 0]] - data[inds[:, 1]]) * (0.5 / radii_vec)

    # Reshape and organize
    gradient_tensor = np.reshape(gradient_mat.transpose(),
                                 [output_dim, input_dim, num_centers],
                                 order='F').transpose(2, 0, 1)

    if normalize:
        # Compute the norm of each vector
        norm_gradient_tensor = np.linalg.norm(gradient_tensor, ord=1, axis=2)

        # If it is a zero vector (has 0 norm), set norm=1, avoid divide by zero
        norm_gradient_tensor[norm_gradient_tensor == 0] = 1.0

        # Normalize each gradient vector
        gradient_tensor = gradient_tensor / np.tile(
            norm_gradient_tensor, (input_dim, 1, 1)).transpose(1, 2, 0)

    center_input_sample_set = sample.sample_set(input_dim)
    center_input_sample_set.set_values(samples[:num_centers, :])
    if cluster_discretization._input_sample_set.get_domain() is not None:
        center_input_sample_set.set_domain(cluster_discretization.\
                _input_sample_set.get_domain())
    center_input_sample_set.set_jacobians(gradient_tensor)
    center_output_sample_set = sample.sample_set(output_dim)
    center_output_sample_set.set_values(data[:num_centers, :])
    if cluster_discretization._output_sample_set.get_domain() is not None:
        center_output_sample_set.set_domain(cluster_discretization.\
                _output_sample_set.get_domain())
    #center_output_sample_set.set_jacobians(gradient_tensor.transpose())
    center_discretization = sample.discretization(center_input_sample_set,
                                                  center_output_sample_set)
    return center_discretization
Beispiel #6
0
def calculate_gradients_rbf(samples, data, centers=None, num_neighbors=None,
        RBF=None, ep=None, normalize=True):
    r"""
    Approximate gradient vectors at ``num_centers, centers.shape[0]`` points
    in the parameter space for each QoI map using a radial basis function
    interpolation method.
    :param samples: Samples for which the model has been solved.
    :type samples: :class:`np.ndarray` of shape (num_samples, Lambda_dim)
    :param data: QoI values corresponding to each sample.
    :type data: :class:`np.ndarray` of shape (num_samples, Data_dim)
    :param centers: Points in :math:`\Lambda` at which to approximate gradient
        information.
    :type centers: :class:`np.ndarray` of shape (num_exval, Lambda_dim)
    :param int num_neighbors: Number of nearest neighbors to use in gradient
        approximation. Default value is Lambda_dim + 2.
    :param string RBF: Choice of radial basis function. Default is Gaussian
    :param float ep: Choice of shape parameter for radial basis function.
        Default value is 1.0
    :param boolean normalize:  If normalize is True, normalize each gradient
        vector
    :rtype: :class:`np.ndarray` of shape (num_samples, Data_dim, Lambda_dim)
    :returns: Tensor representation of the gradient vectors of each
        QoI map at each point in centers
    """
    data = util.fix_dimensions_vector_2darray(util.clean_data(data))
    Lambda_dim = samples.shape[1]
    num_model_samples = samples.shape[0]
    Data_dim = data.shape[1]

    if num_neighbors is None:
        num_neighbors = Lambda_dim + 2
    if ep is None:
        ep = 1.0
    if RBF is None:
        RBF = 'Gaussian'

    # If centers is None we assume the user chose clusters of size
    # Lambda_dim + 2
    if centers is None:
        num_centers = num_model_samples / (Lambda_dim + 2)
        centers = samples[:num_centers]
    else:
        num_centers = centers.shape[0]

    rbf_tensor = np.zeros([num_centers, num_model_samples, Lambda_dim])
    gradient_tensor = np.zeros([num_centers, Data_dim, Lambda_dim])
    tree = spatial.KDTree(samples)

    # For each centers, interpolate the data using the rbf chosen and
    # then evaluate the partial derivative of that rbf at the desired point.
    for c in range(num_centers):
        # Find the k nearest neighbors and their distances to centers[c,:]
        [r, nearest] = tree.query(centers[c, :], k=num_neighbors)
        r = np.tile(r, (Lambda_dim, 1))

        # Compute the linf distances to each of the nearest neighbors
        diffVec = (centers[c, :] - samples[nearest, :]).transpose()

        # Compute the l2 distances between pairs of nearest neighbors
        distMat = spatial.distance_matrix(
            samples[nearest, :], samples[nearest, :])

        # Solve for the rbf weights using interpolation conditions and
        # evaluate the partial derivatives
        rbf_mat_values = \
            np.linalg.solve(radial_basis_function(distMat, RBF),
            radial_basis_function_dxi(r, diffVec, RBF, ep) \
            .transpose()).transpose()

        # Construct the finite difference matrices
        rbf_tensor[c, nearest, :] = rbf_mat_values.transpose()

    gradient_tensor = rbf_tensor.transpose(2, 0, 1).dot(data).transpose(1, 2, 0)

    if normalize:
        # Compute the norm of each vector
        norm_gradient_tensor = np.linalg.norm(gradient_tensor, ord=1, axis=2)

        # If it is a zero vector (has 0 norm), set norm=1, avoid divide by zero
        norm_gradient_tensor[norm_gradient_tensor == 0] = 1.0

        # Normalize each gradient vector
        gradient_tensor = gradient_tensor/np.tile(norm_gradient_tensor,
            (Lambda_dim, 1, 1)).transpose(1, 2, 0)

    return gradient_tensor