Пример #1
0
def mnist8m_experiment(method,
                       components,
                       sparsity_factor,
                       run_id,
                       image=None,
                       n_threads=1,
                       partition_size=3000,
                       optimize_stochastic=True,
                       num_samples=1000,
                       max_iter=8000):
    """
    Run the mnist8m experiment.

    Parameters
    ----------
    method : str
        The method under which to run the experiment (mix1, mix2, or full).
    sparsity_factor : float
        The sparsity of inducing points.
    run_id : int
        The id of the configuration.
    """
    name = 'mnist8m'
    data = data_source.mnist8m_data()[run_id - 1]
    kernel = [
        ExtRBF(data['train_inputs'].shape[1],
               variance=11,
               lengthscale=np.array((9., )),
               ARD=False) for _ in range(10)
    ]
    cond_ll = likelihood.SoftmaxLL(10)
    transform = data_transformation.IdentityTransformation(
        data['train_inputs'], data['train_outputs'])

    return run_model.run_model(data['train_inputs'],
                               data['train_outputs'],
                               data['test_inputs'],
                               data['test_outputs'],
                               cond_ll,
                               kernel,
                               method,
                               components,
                               name,
                               data['id'],
                               sparsity_factor,
                               transform,
                               False,
                               False,
                               optimization_config={
                                   'mog': 60,
                                   'hyp': 15
                               },
                               num_samples=num_samples,
                               max_iter=max_iter,
                               n_threads=n_threads,
                               ftol=10,
                               model_image_dir=image,
                               partition_size=partition_size,
                               optimize_stochastic=optimize_stochastic)
Пример #2
0
def mnist_binary_inducing_experiment(method,
                                     sparsity_factor,
                                     run_id,
                                     image=None,
                                     n_threads=1,
                                     partition_size=3000,
                                     optimize_stochastic=False):
    """
    Run the binary mnist experiment with inducing point learning.

    Parameters
    ----------
    method : str
        The method under which to run the experiment (mix1, mix2, or full).
    sparsity_factor : float
        The sparsity of inducing points.
    run_id : int
        The id of the configuration.
    """
    name = 'mnist_binary'
    data = data_source.mnist_binary_data()[run_id - 1]
    kernel = [
        ExtRBF(data['train_inputs'].shape[1],
               variance=11,
               lengthscale=np.array((9., )),
               ARD=False)
    ]
    cond_ll = likelihood.LogisticLL()
    transform = data_transformation.IdentityTransformation(
        data['train_inputs'], data['train_outputs'])

    return run_model.run_model(data['train_inputs'],
                               data['train_outputs'],
                               data['test_inputs'],
                               data['test_outputs'],
                               cond_ll,
                               kernel,
                               method,
                               name,
                               data['id'],
                               sparsity_factor,
                               transform,
                               False,
                               False,
                               optimization_config={
                                   'mog': 60,
                                   'hyp': 15,
                                   'inducing': 6
                               },
                               max_iter=9,
                               n_threads=n_threads,
                               ftol=10,
                               model_image_dir=image,
                               partition_size=partition_size,
                               optimize_stochastic=optimize_stochastic)
Пример #3
0
def normal_generate_samples(n_samples, var, input_dim=3):
    num_samples = n_samples
    num_in = input_dim
    X = np.random.uniform(low=-1.0, high=1.0, size=(num_samples, num_in))
    X.sort(axis=0)
    rbf = ExtRBF(num_in, variance=0.5,
                 lengthscale=np.array(np.random.uniform(low=0.1, high=3.0, size=input_dim)),
                 ARD=True)
    white = GPy.kern.White(num_in, variance=var[0, 0])
    kernel = rbf + white
    K = kernel.K(X)
    y = np.empty((num_samples, var.shape[0]))
    for j in range(var.shape[0]):
        y[:, j] = np.reshape(np.random.multivariate_normal(np.zeros(num_samples), K), (num_samples))
    return X, y, rbf
Пример #4
0
def usps_experiment(method,
                    components,
                    sparsity_factor,
                    run_id,
                    optimize_stochastic=False):
    """
    Run the usps experiment.

    Parameters
    ----------
    method : str
        The method under which to run the experiment (mix1, mix2, or full).
    sparsity_factor : float
        The sparsity of inducing points.
    run_id : int
        The id of the configuration.
    """
    name = 'usps'
    data = data_source.usps_data()[run_id - 1]
    kernel = [
        ExtRBF(data['train_inputs'].shape[1],
               variance=2,
               lengthscale=np.array((4., )),
               ARD=False) for _ in range(3)
    ]
    cond_ll = likelihood.SoftmaxLL(3)
    transform = data_transformation.IdentityTransformation(
        data['train_inputs'], data['train_outputs'])

    return run_model.run_model(data['train_inputs'],
                               data['train_outputs'],
                               data['test_inputs'],
                               data['test_outputs'],
                               cond_ll,
                               kernel,
                               method,
                               components,
                               name,
                               data['id'],
                               sparsity_factor,
                               transform,
                               True,
                               False,
                               optimization_config={
                                   'mog': 25,
                                   'hyp': 25
                               },
                               max_iter=300)
Пример #5
0
# Load Data files generated in deep GP project
X = np.loadtxt("experiments/data/multimodal/X.txt")
Y = np.loadtxt("experiments/data/multimodal/Y.txt")
Xtest = np.loadtxt("experiments/data/multimodal/Xtest.txt")

# just making sure X has shape[1]
X = np.expand_dims(X, axis=1)
Xtest = np.expand_dims(Xtest, axis=1)
Y = np.expand_dims(Y, axis=1)

# Define a univariate Gaussian likelihood function with a variance of 1.
likelihood = UnivariateGaussian(np.array([1.0]))

# Define a radial basis kernel with a variance of 1, lengthscale of 1 and ARD disabled.
kernel = [ExtRBF(1, variance=1.0, lengthscale=np.array([1.0]), ARD=False)]

# Set the number of inducing points to be the whole the training data.
num_inducing = int(X.shape[0])


# Initialize the model.
if posterior == "full":
    gp = FullGaussianProcess(X, Y, num_inducing, num_samples, kernel, likelihood,
                             latent_noise=latent_noise, exact_ell=False, partition_size=X.shape[0])
elif posterior == "diag":
    gp = DiagonalGaussianProcess(X, Y, num_inducing, num_components, num_samples,
                                 kernel, likelihood, latent_noise=latent_noise, exact_ell=False,
                                 partition_size=X.shape[0])
else:
    assert False
Пример #6
0
from experiments import data_source
from experiments import data_transformation
from savigp.kernel import ExtRBF
from savigp.likelihood import UnivariateGaussian
from savigp import Savigp

# Load the boston dataset.
data = data_source.boston_data()[0]

# Define a univariate Gaussian likelihood function with a variance of 1.
likelihood = UnivariateGaussian(np.array([1.0]))

# Define a radial basis kernel with a variance of 1, lengthscale of 1 and ARD disabled.
kernel = [
    ExtRBF(data['train_inputs'].shape[1],
           variance=1.0,
           lengthscale=np.array([1.0]),
           ARD=False)
]

# Set the number of inducing points to be half of the training data.
num_inducing = int(0.5 * data['train_inputs'].shape[0])

# Transform the data before training.
transform = data_transformation.MeanTransformation(data['train_inputs'],
                                                   data['train_outputs'])
train_inputs = transform.transform_X(data['train_inputs'])
train_outputs = transform.transform_Y(data['train_outputs'])
test_inputs = transform.transform_X(data['test_inputs'])

# Initialize the model.
gp = Savigp(likelihood=likelihood,
Пример #7
0
def get_kernels(input_dim, num_latent_proc, ARD):
    return [ExtRBF(input_dim, variance=1, lengthscale=np.array((1.,)), ARD=ARD)
            for _ in range(num_latent_proc)]
Пример #8
0
def seismic_experiment(method,
                       components,
                       sparsity_factor,
                       run_id,
                       image=None,
                       n_threads=1,
                       partition_size=3000,
                       optimize_stochastic=False):
    name = 'seismic'
    data = data_source.seismic_data()[0]

    # prior_var = np.array([900, 5625, 57600, 108900, 38025, 52900, 75625, 133225])
    # prior_mu = [200, 500, 1600, 2200, 1950, 2300, 2750, 3650]
    # sigma2y = [0.0006, 0.0025, 0.0056, 0.0100]

    scale_factor = 10.  # for numerical reasons (final predicitons to be post-processed)
    mean_depth = np.array([200.0, 500.0, 1600.0, 2200.0], dtype=np.double)
    mean_vel = np.array([1950.0, 2300.0, 2750.0, 3650.0], dtype=np.double)
    std_depth = mean_depth * 0.15
    std_vel = mean_vel * 0.10
    prior_mu = np.hstack((mean_depth, mean_vel)) / scale_factor
    prior_var = np.square(np.hstack(
        (std_depth, std_vel))) / (scale_factor * scale_factor)
    sigma2y = np.square([0.025, 0.05, 0.075, 0.1])

    input_dim = data['train_inputs'].shape[1]

    kernel = [
        ExtRBF(input_dim,
               variance=prior_var[i],
               lengthscale=np.array((1, )),
               ARD=True) for i in range(len(prior_var))
    ]

    cond_ll = likelihood.SeismicLL(4, sigma2y)

    transform = data_transformation.IdentityTransformation(
        data['train_inputs'], data['train_outputs'])
    #transform = data_transformation.MeanStdXTransformation(data['train_inputs'], data['train_outputs'])

    return run_model.run_model(
        data['train_inputs'],
        data['train_outputs'],
        data['test_inputs'],
        data['test_outputs'],
        cond_ll,
        kernel,
        method,
        components,
        name,
        data['id'],
        sparsity_factor,
        transform,
        False,
        True,
        optimization_config={'mog': 100},
        # max_iter=10,
        partition_size=partition_size,
        # ftol=1,
        n_threads=n_threads,
        model_image_dir=image,
        GP_mean=prior_mu,
        init_var=0.001 * prior_var,
        num_samples=100000,
    )