Esempio n. 1
0
def optimum_bimodal_spd(spd_manifold):
    """
    This function returns the global minimum (x, f(x)) of the bimodal distribution on the SPD manifold.
    Note: the means and covariances of the two Gaussians can be modified in the function "get_bimodal_parameters".

    Parameters
    ----------
    :param spd_manifold: n-dimensional SPD manifold             (pymanopt manifold)

    Returns
    -------
    :return opt_x: location of the global minimum of the bimodal distribution on the SPD manifold
    :return opt_y: value of the global minimum of the bimodal distribution on the SPD manifold
    """
    # Function parameters
    mu1, mu2, sigma1, sigma2 = get_bimodal_parameters(spd_manifold)

    # Test both means
    test_val1 = bimodal_function_spd(
        torch.tensor(symmetric_matrix_to_vector_mandel(mu1)),
        spd_manifold).numpy()
    test_val2 = bimodal_function_spd(
        torch.tensor(symmetric_matrix_to_vector_mandel(mu2)),
        spd_manifold).numpy()

    # Optimum x and y
    if test_val1 < test_val2:
        opt_x = mu1
        opt_y = test_val1
    else:
        opt_x = mu2
        opt_y = test_val2

    return opt_x, opt_y
Esempio n. 2
0
def bimodal_function_spd(x, spd_manifold):
    """
    This function computes a bimodal Gaussian distribution on the SPD manifold.
    Each Gaussian is defined on the tangent space of their mean and projected to the manifold via the exponential map.
    The value of the function is therefore computed by projecting the point on the manifold to the tangent space of the
    means, computing the value of the function for each Gaussian in the corresponding Euclidean space and additioning
    the values obtained for each Gaussian
    To be used for BO, the input x is a torch tensor and the function must output a numpy [1,1] array. The input is
    given in Mandel notation.

    Note: the means and covariances of the two Gaussians can be modified in the function "get_bimodal_parameters".

    Parameters
    ----------
    :param x: point on the SPD manifold in Mandel notation      (torch tensor)
    :param spd_manifold: n-dimensional SPD manifold             (pymanopt manifold)

    Returns
    -------
    :return: value of the bimodal distribution at x             (numpy [1,1] array)
    """
    # Dimension
    dimension = spd_manifold._n

    # Mandel notation dimension
    vector_dimension = int(dimension + dimension * (dimension - 1) / 2)

    # Data to numpy
    torch_type = x.dtype
    x = x.detach().numpy()

    if np.ndim(x) < 2:
        x = x[None]

    # To vector (Mandel notation)
    x = vector_to_symmetric_matrix_mandel(x[0])

    # Function parameters
    mu1, mu2, sigma1, sigma2 = get_bimodal_parameters(spd_manifold)
    # Useful operation values
    inv_sigma1 = np.linalg.inv(sigma1)
    det_sigma1 = np.linalg.det(sigma1)
    inv_sigma2 = np.linalg.inv(sigma2)
    det_sigma2 = np.linalg.det(sigma2)

    # Probability computation for each Gaussian
    # Gaussian 1
    x_proj1 = symmetric_matrix_to_vector_mandel(spd_manifold.log(mu1, x))
    prob1 = -np.exp(- 0.5 * np.dot(x_proj1, np.dot(inv_sigma1, x_proj1.T))) / \
            np.sqrt((2 * np.pi) ** vector_dimension * det_sigma1)
    # Gaussian 2
    x_proj2 = symmetric_matrix_to_vector_mandel(spd_manifold.log(mu2, x))
    prob2 = -np.exp(- 0.5 * np.dot(x_proj2, np.dot(inv_sigma2, x_proj2.T))) / \
            np.sqrt((2 * np.pi) ** vector_dimension * det_sigma2)

    # Function value
    y = prob1 + prob2

    return torch.tensor(y[None, None], dtype=torch_type)
Esempio n. 3
0
def optimum_product_of_sines_spd(spd_manifold):
    """
    This function returns the global minimum (x, f(x)) of the Product of sines function on the SPD manifold.
    Note: the based point is obtained from and can be modified in the function "get_product_of_sines_base".

    Parameters
    ----------
    :param spd_manifold: n-dimensional SPD manifold          (pymanopt manifold)

    Returns
    -------
    :return opt_x: location of the global minimum of the Product of sines function on the SPD manifold
    :return opt_y: value of the global minimum of the Product of sines function on the SPD manifold
    """
    # Dimension
    dimension = spd_manifold._n

    # Optimum x
    base = get_product_of_sines_base(spd_manifold)
    opt_x_log = np.pi / 2 * np.ones((dimension, dimension))
    opt_x_log[1, 1] = -np.pi / 2
    opt_x = spd_manifold.exp(base, opt_x_log)
    opt_x_vec = symmetric_matrix_to_vector_mandel(opt_x)[None]
    # Optimum y
    opt_y = product_of_sines_function_spd(torch.tensor(opt_x_vec),
                                          spd_manifold).numpy()

    return opt_x, opt_y
Esempio n. 4
0
def ackley_function_spd(x, spd_manifold):
    """
    This function computes the Ackley function on the SPD manifold.
    The Ackley function is defined on the tangent space of a base point and projected to the manifold via the
    exponential map. The value of the function is therefore computed by projecting the point on the manifold to
    the tangent space of the base point and by computing the value of the Ackley function in this Euclidean space.
    To be used for BO, the input x is a torch tensor and the function must output a numpy [1,1] array. The input is
    given in Mandel notation.

    Note: the based point is obtained from and can be modified in the function "get_ackley_base".

    Parameters
    ----------
    :param x: point on the SPD manifold (in Mandel notation)    (torch tensor)
    :param spd_manifold: n-dimensional SPD manifold             (pymanopt manifold)

    Returns
    -------
    :return: value of the Ackley function at x                  (numpy [1,1] array)
    """
    # Dimension
    dimension = spd_manifold._n

    # Mandel notation dimension
    vector_dimension = int(dimension + dimension * (dimension - 1) / 2)

    # Data to numpy
    torch_type = x.dtype
    x = x.detach().numpy()

    if np.ndim(x) < 2:
        x = x[None]

    # To vector (Mandel notation)
    x = vector_to_symmetric_matrix_mandel(x[0])

    # Projection in tangent space of the base
    base = get_ackley_base(spd_manifold)
    x_proj = spd_manifold.log(base, x)

    # Vectorize to use only once the symmetric elements
    # Division by sqrt(2) to keep the original elements (this is equivalent to Voigt instead of Mandel)
    x_proj_vec = symmetric_matrix_to_vector_mandel(x_proj)
    x_proj_vec[dimension:] /= 2.**0.5

    # Ackley function parameters
    a = 20
    b = 0.2
    c = 2 * np.pi

    # Ackley function
    aexp_term = -a * np.exp(
        -b * np.sqrt(np.sum(x_proj_vec**2) / vector_dimension))
    expcos_term = -np.exp(np.sum(np.cos(c * x_proj_vec) / vector_dimension))
    y = aexp_term + expcos_term + a + np.exp(1.)

    return torch.tensor(y[None, None], dtype=torch_type)
Esempio n. 5
0
def product_of_sines_function_spd(x, spd_manifold, coefficient=100.):
    """
    This function computes the Product of sines function on the SPD manifold.
    The Product of sines function is defined on the tangent space of a base point and projected to the manifold via the
    exponential map. The value of the function is therefore computed by projecting the point on the manifold to
    the tangent space of the base point and by computing the value of the function in this Euclidean space.
    To be used for BO, the input x is a torch tensor and the function must output a numpy [1,1] array. The input is
    given in Mandel notation.

    Note: the based point is obtained from and can be modified in the function "get_product_of_sines_base".

    Parameters
    ----------
    :param x: point on the SPD manifold (in Mandel notation)    (torch tensor)
    :param spd_manifold: n-dimensional SPD manifold             (pymanopt manifold)

    Optional parameters
    -------------------
    :param coefficient: multiplying coefficient of the product of sines

    Returns
    -------
    :return: value of the Product of sines function at x                  (numpy [1,1] array)
    """
    # Dimension
    dimension = spd_manifold._n

    # Mandel notation dimension
    vector_dimension = int(dimension + dimension * (dimension - 1) / 2)

    # Data to numpy
    torch_type = x.dtype
    x = x.detach().numpy()

    if np.ndim(x) < 2:
        x = x[None]

    # To vector (Mandel notation)
    x = vector_to_symmetric_matrix_mandel(x[0])

    # Projection in tangent space of the base
    base = get_product_of_sines_base(spd_manifold)
    x_proj = spd_manifold.log(base, x)

    # Vectorize to use only once the symmetric elements
    # Division by sqrt(2) to keep the original elements (this is equivalent to Voigt instead of Mandel)
    x_proj_vec = symmetric_matrix_to_vector_mandel(x_proj)
    x_proj_vec[dimension:] /= 2.**0.5

    # Sines
    sin_x_proj_vec = np.sin(x_proj_vec)

    # Product of sines function
    y = coefficient * sin_x_proj_vec[0] * np.prod(sin_x_proj_vec)

    return torch.tensor(y[None, None], dtype=torch_type)
Esempio n. 6
0
def rosenbrock_function_spd(x, spd_manifold):
    """
    This function computes the Rosenbrock function on the SPD manifold.
    The Rosenbrock function is defined on the tangent space of a base point and projected to the manifold via the
    exponential map. The value of the function is therefore computed by projecting the point on the manifold to
    the tangent space of the base point and by computing the value of the Rosenbrock function in this Euclidean space.
    To be used for BO, the input x is a torch tensor and the function must output a numpy [1,1] array. The input is
    given in Mandel notation.

    Note: the based point is obtained from and can be modified in the function "get_rosenbrock_base".

    Parameters
    ----------
    :param x: point on the SPD manifold in Mandel notation      (torch tensor)
    :param spd_manifold: n-dimensional SPD manifold             (pymanopt manifold)

    Returns
    -------
    :return: value of the Rosenbrock function at x              (numpy [1,1] array)
    """
    # Dimension
    dimension = spd_manifold._n

    # Mandel notation dimension
    vector_dimension = int(dimension + dimension * (dimension - 1) / 2)

    # Data to numpy
    torch_type = x.dtype
    x = x.detach().numpy()
    if np.ndim(x) < 2:
        x = x[None]

    # To vector (Mandel notation)
    x = vector_to_symmetric_matrix_mandel(x[0])

    # Projection in tangent space of the mean.
    base = get_rosenbrock_base(spd_manifold)
    x_proj = spd_manifold.log(base, x)

    # Vectorize to use only once the symmetric elements
    # Division by sqrt(2) to keep the original elements (this is equivalent to Voigt instead of Mandel)
    x_proj_vec = symmetric_matrix_to_vector_mandel(x_proj)
    x_proj_vec[dimension:] /= 2.**0.5

    # Rosenbrock function
    y = 0
    for i in range(vector_dimension - 1):
        y += 100 * (x_proj_vec[i + 1] - x_proj_vec[i]**2)**2 + (
            1 - x_proj_vec[i])**2

    return torch.tensor(y[None, None], dtype=torch_type)
def cholesky_embedded_function_wrapped(x_cholesky,
                                       low_dimensional_spd_manifold,
                                       spd_manifold, test_function):
    """
    This function is a wrapper for tests function on the SPD manifold with inputs in the form of a Cholesky
    decomposition. The Cholesky decomposition input is transformed into the corresponding SPD matrix which is then
    given as input for the given test function.

    Parameters
    ----------
    :param x_cholesky: Cholesky decomposition of a SPD matrix
    :param low_dimensional_spd_manifold: d-dimensional SPD manifold     (pymanopt manifold)
    :param spd_manifold: D-dimensional SPD manifold                     (pymanopt manifold)
    :param test_function: embedded function on the low-dimensional SPD manifold to be tested

    Returns
    -------
    :return: value of the test function at x                            (numpy [1,1] array)
    """
    # Dimension
    dimension = spd_manifold._n

    # Data to numpy
    torch_type = x_cholesky.dtype
    x_cholesky = x_cholesky.detach().numpy()

    if np.ndim(x_cholesky) == 2:
        x_cholesky = x_cholesky[0]

    # Verify that Cholesky decomposition does not have zero
    if x_cholesky.size - np.count_nonzero(x_cholesky):
        x_cholesky += 1e-6

    # Add also a small value to too-close-to-zero Cholesky decomposition elements
    x_cholesky[np.abs(x_cholesky) < 1e-10] += 1e-10

    # Reshape matrix
    indices = np.tril_indices(dimension)
    xL = np.zeros((dimension, dimension))
    xL[indices] = x_cholesky

    # Compute SPD from Cholesky
    x = np.dot(xL, xL.T)
    # Mandel notation
    x = symmetric_matrix_to_vector_mandel(x)
    # To torch
    x = torch.from_numpy(x).to(dtype=torch_type)

    # Test function
    return test_function(x, low_dimensional_spd_manifold)
Esempio n. 8
0
def optimum_ackley_spd(spd_manifold):
    """
    This function returns the global minimum (x, f(x)) of the Ackley function on the SPD manifold.
    Note: the based point is obtained from and can be modified in the function "get_ackley_base".

    Parameters
    ----------
    :param spd_manifold: n-dimensional SPD manifold          (pymanopt manifold)

    Returns
    -------
    :return opt_x: location of the global minimum of the Ackley function on the SPD manifold
    :return opt_y: value of the global minimum of the Ackley function on the SPD manifold
    """
    # Optimum x
    opt_x = get_ackley_base(spd_manifold)
    opt_x_vec = symmetric_matrix_to_vector_mandel(opt_x)[None]
    # Optimum y
    opt_y = ackley_function_spd(torch.tensor(opt_x_vec), spd_manifold).numpy()

    return opt_x, opt_y
Esempio n. 9
0
    dim_vec = int((dim * dim + dim) / 2)

    # Instantiate the manifold
    spd_manifold = pyman_man.PositiveDefinite(dim)

    # Update the random function of the manifold (the original one samples only eigenvalues between 1 and 2).
    # We need to specify the minimum and maximum eigenvalues of the random matrices.
    spd_manifold.rand = types.MethodType(spd_sample, spd_manifold)
    # Specify the domain
    min_eig = 0.001
    max_eig = 5.
    spd_manifold.min_eig = min_eig
    spd_manifold.max_eig = max_eig

    # Origin in the manifold
    origin_man = symmetric_matrix_to_vector_mandel(np.eye(dim))

    # Define the range of parameter for the kernel
    nb_params = 30
    if dim == 2:
        betas = np.logspace(-1, 2, nb_params)
    elif dim == 3:
        betas = np.logspace(-1.1, 1, nb_params)
    elif dim >= 5:
        betas = np.logspace(-1.5, 0.8, nb_params)

    min_eigval_trials = []

    for trial in range(nb_trials):
        print('Trial ', trial)
Esempio n. 10
0
    else:
        disp_fig = False

    # Instantiate the manifold
    spd_manifold = pyman_man.PositiveDefinite(dim)

    # Update the random function of the manifold (the original one samples only eigenvalues between 1 and 2).
    # We need to specify the minimum and maximum eigenvalues of the random matrices. This is done when defining bounds.
    spd_manifold.rand = types.MethodType(spd_sample, spd_manifold)

    # Function to optimize
    test_function_chol = functools.partial(cholesky_function_wrapped, test_function=ackley_function_spd,
                                           spd_manifold=spd_manifold)
    # Optimum
    true_min, true_opt_val = optimum_ackley_spd(spd_manifold)
    true_min_vec = symmetric_matrix_to_vector_mandel(true_min)[None]
    true_min_chol = np.linalg.cholesky(true_min)
    true_min_chol = true_min_chol[np.tril_indices(dim)]

    # Plot test function with inputs on the sphere
    # 3D figure
    r_cone = 5.
    if disp_fig:
        fig = plt.figure(figsize=(5, 5))
        ax = Axes3D(fig)

        max_colors = bo_plot_function_spd(ax, test_function_chol, r_cone=r_cone, true_opt_x=true_min,
                                          true_opt_y=true_opt_val, alpha=0.3, n_elems=100, n_elems_h=10, chol=True)
        ax.set_title('True function', fontsize=20)
        plt.show()
    else:
Esempio n. 11
0
    # Define the test function
    # Parameters for the nested test function
    grassmann_manifold = pyman_man.Grassmann(dim, latent_dim)
    projection_matrix_test = torch.from_numpy(
        grassmann_manifold.rand()).double()

    # Define the nested test function
    test_function = functools.partial(
        projected_function_spd,
        low_dimensional_spd_manifold=latent_spd_manifold,
        test_function=rosenbrock_function_spd,
        projection_matrix=projection_matrix_test)
    # Optimum
    true_min, true_opt_val = optimum_projected_function_spd(
        optimum_rosenbrock_spd, latent_spd_manifold, projection_matrix_test)
    true_min_vec = symmetric_matrix_to_vector_mandel(true_min)[None]

    # Specify the optimization domain
    # Eigenvalue bounds
    min_eigenvalue = 1e-4
    max_eigenvalue = 5.
    # Manifolds eigenvalue bounds
    spd_manifold.min_eig = min_eigenvalue
    spd_manifold.max_eig = max_eigenvalue
    latent_spd_manifold.min_eig = min_eigenvalue
    latent_spd_manifold.max_eig = max_eigenvalue
    # Optimization domain
    lower_bound = torch.cat(
        (min_eigenvalue * torch.ones(dim, dtype=torch.float64),
         -max_eigenvalue / np.sqrt(2) *
         torch.ones(dim_vec - dim, dtype=torch.float64)))
Esempio n. 12
0
        disp_fig = True
    else:
        disp_fig = False

    # Instantiate the manifold
    spd_manifold = pyman_man.PositiveDefinite(dim)

    # Update the random function of the manifold (the original one samples only eigenvalues between 1 and 2).
    # We need to specify the minimum and maximum eigenvalues of the random matrices. This is done when defining bounds.
    spd_manifold.rand = types.MethodType(spd_sample, spd_manifold)

    # Function to optimize
    test_function = functools.partial(ackley_function_spd, spd_manifold=spd_manifold)
    # Optimum
    true_min, true_opt_val = optimum_ackley_spd(spd_manifold)
    true_min_vec = symmetric_matrix_to_vector_mandel(true_min)[None]

    # Plot test function with inputs on the sphere
    # 3D figure
    r_cone = 5.
    if disp_fig:
        fig = plt.figure(figsize=(5, 5))
        ax = Axes3D(fig)

        max_colors = bo_plot_function_spd(ax, test_function, r_cone=r_cone, true_opt_x=true_min,
                                          true_opt_y=true_opt_val, alpha=0.3, n_elems=100, n_elems_h=10)
        ax.set_title('True function', fontsize=20)
        plt.show()
    else:
        max_colors = None
Esempio n. 13
0
    else:
        disp_fig = False

    # Instantiate the manifold
    spd_manifold = pyman_man.PositiveDefinite(dim)

    # Update the random function of the manifold (the original one samples only eigenvalues between 1 and 2).
    # We need to specify the minimum and maximum eigenvalues of the random matrices. This is done when defining bounds.
    spd_manifold.rand = types.MethodType(spd_sample, spd_manifold)

    # Function to optimize
    test_function = functools.partial(ackley_function_spd,
                                      spd_manifold=spd_manifold)
    # Optimum
    true_min, true_opt_val = optimum_ackley_spd(spd_manifold)
    true_min_vec = symmetric_matrix_to_vector_mandel(true_min)[None]

    # Plot test function with inputs on the sphere
    # 3D figure
    r_cone = 5.
    if disp_fig:
        fig = plt.figure(figsize=(5, 5))
        ax = Axes3D(fig)

        max_colors = bo_plot_function_spd(ax,
                                          test_function,
                                          r_cone=r_cone,
                                          true_opt_x=true_min,
                                          true_opt_y=true_opt_val,
                                          alpha=0.3,
                                          n_elems=100,
Esempio n. 14
0
    demos = [data_demos[i]['pos'][0][0] for i in range(data_demos.shape[0])]

    # Number of samples, time sampling
    nb_data_init = demos[0].shape[1]
    dt = 1.

    time = np.hstack([np.arange(0, nb_data_init) * dt] * data_demos.shape[0])
    demos_np = np.hstack(demos)

    # Euclidean vector data
    data_eucl = np.vstack((time, demos_np))
    data_eucl = data_eucl[:, :nb_data_init * nb_samples]

    # Create artificial SPD matrices from demonstrations and store them in Mandel notation (along with time)
    data_spd_mandel = [symmetric_matrix_to_vector_mandel(expmap(0.01 * np.dot(data_eucl[1:, n][:, None],
                                                                              data_eucl[1:, n][None]),
                                                                np.eye(2)))[:, None] for n in range(data_eucl.shape[1])]
    data_spd_mandel = np.vstack((data_eucl[0], np.concatenate(data_spd_mandel, axis=1)))

    # Training data
    data = data_spd_mandel[:, ::2]
    # Removing data to show GP uncertainty
    # id_to_remove = np.hstack((np.arange(12, 27), np.arange(34, 38)))
    # id_to_remove = np.hstack((np.arange(24, 54), np.arange(68, 76)))
    id_to_remove = np.hstack((np.arange(24, 37), np.arange(68, 76)))
    # id_to_remove = np.hstack((np.arange(12, 24), np.arange(76, 84)))
    data = np.delete(data, id_to_remove, axis=1)
    nb_data = data.shape[1]
    dim = 2
    dim_vec = 3