Exemple #1
0
def bgplvm_simulation(
    optimize=True,
    verbose=1,
    plot=True,
    plot_sim=False,
    max_iters=2e4,
):
    from GPy import kern
    from GPy.models import BayesianGPLVM

    D1, D2, D3, N, num_inducing, Q = 13, 5, 8, 45, 3, 9
    _, _, Ylist = _simulate_matern(D1, D2, D3, N, num_inducing, plot_sim)
    Y = Ylist[0]
    k = kern.Linear(Q,
                    ARD=True)  # + kern.white(Q, _np.exp(-2)) # + kern.bias(Q)
    # k = kern.RBF(Q, ARD=True, lengthscale=10.)
    m = BayesianGPLVM(Y, Q, init="PCA", num_inducing=num_inducing, kernel=k)
    m.X.variance[:] = _np.random.uniform(0, .01, m.X.shape)
    m.likelihood.variance = .1

    if optimize:
        print("Optimizing model:")
        m.optimize('bfgs', messages=verbose, max_iters=max_iters, gtol=.05)
    if plot:
        m.X.plot("BGPLVM Latent Space 1D")
        m.kern.plot_ARD('BGPLVM Simulation ARD Parameters')
    return m
Exemple #2
0
    def _get_features(
        self,
        X: np.ndarray,
        Y: np.ndarray,
        C: np.ndarray,
        display_messages: bool = True,
    ) -> Tuple[np.ndarray, np.ndarray]:
        """Generate features for the given task.

        Parameters
        ----------
        X : np.ndarray
            Training examples
        Y : np.ndarray
            Training labels
        C : np.ndarray
            Training costs
        display_messages : bool, optional
            Whether to log messages to the console or not, by default True.

        Returns
        -------
        Tuple[np.ndarray, np.ndarray]
            The features mean and std arrays.
        """
        n_tasks = Y.shape[0]
        n_configs = X.shape[0]
        index_task = np.repeat(np.arange(n_tasks), n_configs)
        Y_norm, _, _ = self.normalize_Y(deepcopy(Y.flatten()), index_task)

        # train the probabilistic encoder
        kern = GPy.kern.Matern52(input_dim=self.hidden_space, ARD=True)

        m_lvm = BayesianGPLVM(
            Y_norm.reshape(n_tasks, n_configs),
            input_dim=self.hidden_space,
            kernel=kern,
            num_inducing=self.n_inducing_lvm,
        )
        m_lvm.optimize(max_iters=self.max_iters, messages=display_messages)

        ls = np.array([
            m_lvm.kern.lengthscale[i]
            for i in range(m_lvm.kern.lengthscale.shape[0])
        ])

        # generate data to train the multi-task model
        task_features_mean = np.array(m_lvm.X.mean / ls)
        task_features_std = np.array(np.sqrt(m_lvm.X.variance) / ls)

        return task_features_mean, task_features_std
def bgplvm_simulation(
    optimize=True,
    verbose=1,
    plot=True,
    plot_sim=False,
    max_iters=2e4,
):
    from GPy import kern
    from GPy.models import BayesianGPLVM

    D1, D2, D3, N, num_inducing, Q = 49, 30, 10, 12, 3, 10
    _, _, Ylist = _simulate_sincos(D1, D2, D3, N, num_inducing, Q, plot_sim)
    Y = Ylist[0]
    k = kern.linear(Q, ARD=True)
    m = BayesianGPLVM(Y, Q, init="PCA", num_inducing=num_inducing, kernel=k)
    m.X_variance = m.X_variance * .7
    m['noise'] = Y.var() / 100.

    if optimize:
        print "Optimizing model:"
        m.optimize('scg', messages=verbose, max_iters=max_iters, gtol=.05)
    if plot:
        m.plot_X_1d("BGPLVM Latent Space 1D")
        m.kern.plot_ARD('BGPLVM Simulation ARD Parameters')
    return m
def stick_bgplvm(model=None, optimize=True, verbose=True, plot=True):
    """Interactive visualisation of the Stick Man data from Ohio State University with the Bayesian GPLVM."""
    from GPy.models import BayesianGPLVM
    from matplotlib import pyplot as plt
    import numpy as np
    import GPy
    import pods

    data = pods.datasets.osu_run1()
    Q = 6
    kernel = GPy.kern.RBF(Q, lengthscale=np.repeat(.5, Q), ARD=True)
    m = BayesianGPLVM(data['Y'], Q, init="PCA", num_inducing=20, kernel=kernel)

    m.data = data
    m.likelihood.variance = 0.001

    # optimize
    try:
        if optimize: m.optimize('bfgs', messages=verbose, max_iters=5e3, bfgs_factor=10)
    except KeyboardInterrupt:
        print("Keyboard interrupt, continuing to plot and return")

    if plot:
        fig, (latent_axes, sense_axes) = plt.subplots(1, 2)
        plt.sca(latent_axes)
        m.plot_latent(ax=latent_axes)
        y = m.Y[:1, :].copy()
        data_show = GPy.plotting.matplot_dep.visualize.stick_show(y, connect=data['connect'])
        dim_select = GPy.plotting.matplot_dep.visualize.lvm_dimselect(m.X.mean[:1, :].copy(), m, data_show, latent_axes=latent_axes, sense_axes=sense_axes)
        fig.canvas.draw()
        # Canvas.show doesn't work on OSX.
        #fig.canvas.show()
        raw_input('Press enter to finish')

    return m
def bgplvm_simulation(optimize=True, verbose=1,
                      plot=True, plot_sim=False,
                      max_iters=2e4,
                      ):
    from GPy import kern
    from GPy.models import BayesianGPLVM

    D1, D2, D3, N, num_inducing, Q = 15, 5, 8, 30, 3, 10
    _, _, Ylist = _simulate_sincos(D1, D2, D3, N, num_inducing, Q, plot_sim)
    Y = Ylist[0]
    k = kern.linear(Q, ARD=True) + kern.bias(Q, _np.exp(-2)) + kern.white(Q, _np.exp(-2)) # + kern.bias(Q)
    m = BayesianGPLVM(Y, Q, init="PCA", num_inducing=num_inducing, kernel=k)
    m['noise'] = Y.var() / 100.

    if optimize:
        print "Optimizing model:"
        m.optimize('scg', messages=verbose, max_iters=max_iters,
                   gtol=.05)
    if plot:
        m.plot_X_1d("BGPLVM Latent Space 1D")
        m.kern.plot_ARD('BGPLVM Simulation ARD Parameters')
    return m
def stick_bgplvm(model=None, optimize=True, verbose=True, plot=True):
    from GPy.models import BayesianGPLVM
    from matplotlib import pyplot as plt
    import GPy

    data = GPy.util.datasets.osu_run1()
    Q = 6
    kernel = GPy.kern.rbf(Q, ARD=True) + GPy.kern.bias(Q, _np.exp(-2)) + GPy.kern.white(Q, _np.exp(-2))
    m = BayesianGPLVM(data['Y'], Q, init="PCA", num_inducing=20, kernel=kernel)
    # optimize
    m.ensure_default_constraints()
    if optimize: m.optimize('scg', messages=verbose, max_iters=200, xtol=1e-300, ftol=1e-300)
    m._set_params(m._get_params())
    if plot:
        plt.clf, (latent_axes, sense_axes) = plt.subplots(1, 2)
        plt.sca(latent_axes)
        m.plot_latent()
        y = m.likelihood.Y[0, :].copy()
        data_show = GPy.util.visualize.stick_show(y[None, :], connect=data['connect'])
        GPy.util.visualize.lvm_dimselect(m.X[0, :].copy(), m, data_show, latent_axes=latent_axes, sense_axes=sense_axes)
        raw_input('Press enter to finish')

    return m
def swiss_roll(optimize=True, verbose=1, plot=True, N=1000, num_inducing=15, Q=4, sigma=.2):
    import GPy
    from GPy.util.datasets import swiss_roll_generated
    from GPy.models import BayesianGPLVM

    data = swiss_roll_generated(num_samples=N, sigma=sigma)
    Y = data['Y']
    Y -= Y.mean()
    Y /= Y.std()

    t = data['t']
    c = data['colors']

    try:
        from sklearn.manifold.isomap import Isomap
        iso = Isomap().fit(Y)
        X = iso.embedding_
        if Q > 2:
            X = _np.hstack((X, _np.random.randn(N, Q - 2)))
    except ImportError:
        X = _np.random.randn(N, Q)

    if plot:
        import matplotlib.pyplot as plt
        from mpl_toolkits.mplot3d import Axes3D  # @UnusedImport
        fig = plt.figure("Swiss Roll Data")
        ax = fig.add_subplot(121, projection='3d')
        ax.scatter(*Y.T, c=c)
        ax.set_title("Swiss Roll")

        ax = fig.add_subplot(122)
        ax.scatter(*X.T[:2], c=c)
        ax.set_title("BGPLVM init")

    var = .5
    S = (var * _np.ones_like(X) + _np.clip(_np.random.randn(N, Q) * var ** 2,
                                         - (1 - var),
                                         (1 - var))) + .001
    Z = _np.random.permutation(X)[:num_inducing]

    kernel = GPy.kern.rbf(Q, ARD=True) + GPy.kern.bias(Q, _np.exp(-2)) + GPy.kern.white(Q, _np.exp(-2))

    m = BayesianGPLVM(Y, Q, X=X, X_variance=S, num_inducing=num_inducing, Z=Z, kernel=kernel)
    m.data_colors = c
    m.data_t = t
    m['noise_variance'] = Y.var() / 100.

    if optimize:
        m.optimize('scg', messages=verbose, max_iters=2e3)

    if plot:
        fig = plt.figure('fitted')
        ax = fig.add_subplot(111)
        s = m.input_sensitivity().argsort()[::-1][:2]
        ax.scatter(*m.X.T[s], c=c)

    return m
def bgplvm_simulation(optimize=True, verbose=1,
                      plot=True, plot_sim=False,
                      max_iters=2e4,
                      ):
    from GPy import kern
    from GPy.models import BayesianGPLVM

    D1, D2, D3, N, num_inducing, Q = 13, 5, 8, 45, 3, 9
    _, _, Ylist = _simulate_matern(D1, D2, D3, N, num_inducing, plot_sim)
    Y = Ylist[0]
    k = kern.Linear(Q, ARD=True)  # + kern.white(Q, _np.exp(-2)) # + kern.bias(Q)
    # k = kern.RBF(Q, ARD=True, lengthscale=10.)
    m = BayesianGPLVM(Y, Q, init="PCA", num_inducing=num_inducing, kernel=k)
    m.X.variance[:] = _np.random.uniform(0, .01, m.X.shape)
    m.likelihood.variance = .1

    if optimize:
        print("Optimizing model:")
        m.optimize('bfgs', messages=verbose, max_iters=max_iters,
                   gtol=.05)
    if plot:
        m.X.plot("BGPLVM Latent Space 1D")
        m.kern.plot_ARD('BGPLVM Simulation ARD Parameters')
    return m
def stick_bgplvm(model=None, optimize=True, verbose=True, plot=True):
    """Interactive visualisation of the Stick Man data from Ohio State University with the Bayesian GPLVM."""
    from GPy.models import BayesianGPLVM
    from matplotlib import pyplot as plt
    import numpy as np
    import GPy
    import pods

    data = pods.datasets.osu_run1()
    Q = 6
    kernel = GPy.kern.RBF(Q, lengthscale=np.repeat(0.5, Q), ARD=True)
    m = BayesianGPLVM(data["Y"], Q, init="PCA", num_inducing=20, kernel=kernel)

    m.data = data
    m.likelihood.variance = 0.001

    # optimize
    try:
        if optimize:
            m.optimize("bfgs", messages=verbose, max_iters=5e3, bfgs_factor=10)
    except KeyboardInterrupt:
        print("Keyboard interrupt, continuing to plot and return")

    if plot:
        fig, (latent_axes, sense_axes) = plt.subplots(1, 2)
        plt.sca(latent_axes)
        m.plot_latent(ax=latent_axes)
        y = m.Y[:1, :].copy()
        data_show = GPy.plotting.matplot_dep.visualize.stick_show(
            y, connect=data["connect"])
        dim_select = GPy.plotting.matplot_dep.visualize.lvm_dimselect(
            m.X.mean[:1, :].copy(),
            m,
            data_show,
            latent_axes=latent_axes,
            sense_axes=sense_axes,
        )
        fig.canvas.draw()
        # Canvas.show doesn't work on OSX.
        # fig.canvas.show()
        input("Press enter to finish")

    return m
def stick_bgplvm(model=None, optimize=True, verbose=True, plot=True):
    from GPy.models import BayesianGPLVM
    from matplotlib import pyplot as plt
    import numpy as np
    import GPy
    import pods

    data = pods.datasets.osu_run1()
    Q = 6
    kernel = GPy.kern.RBF(Q, lengthscale=np.repeat(.5, Q), ARD=True)
    m = BayesianGPLVM(data['Y'], Q, init="PCA", num_inducing=20, kernel=kernel)

    m.data = data
    m.likelihood.variance = 0.001

    # optimize
    try:
        if optimize:
            m.optimize('bfgs', messages=verbose, max_iters=5e3, bfgs_factor=10)
    except KeyboardInterrupt:
        print "Keyboard interrupt, continuing to plot and return"

    if plot:
        fig, (latent_axes, sense_axes) = plt.subplots(1, 2)
        plt.sca(latent_axes)
        m.plot_latent(ax=latent_axes)
        y = m.Y[:1, :].copy()
        data_show = GPy.plotting.matplot_dep.visualize.stick_show(
            y, connect=data['connect'])
        dim_select = GPy.plotting.matplot_dep.visualize.lvm_dimselect(
            m.X.mean[:1, :].copy(),
            m,
            data_show,
            latent_axes=latent_axes,
            sense_axes=sense_axes)
        fig.canvas.draw()
        fig.canvas.show()
        raw_input('Press enter to finish')

    return m
def stick_bgplvm(model=None, optimize=True, verbose=True, plot=True):
    from GPy.models import BayesianGPLVM
    from matplotlib import pyplot as plt
    import GPy

    data = GPy.util.datasets.osu_run1()
    Q = 6
    kernel = GPy.kern.rbf(Q, ARD=True) + GPy.kern.bias(
        Q, _np.exp(-2)) + GPy.kern.white(Q, _np.exp(-2))
    m = BayesianGPLVM(data['Y'], Q, init="PCA", num_inducing=20, kernel=kernel)
    # optimize
    m.ensure_default_constraints()
    if optimize:
        m.optimize('scg',
                   messages=verbose,
                   max_iters=200,
                   xtol=1e-300,
                   ftol=1e-300)
    m._set_params(m._get_params())
    if plot:
        plt.clf, (latent_axes, sense_axes) = plt.subplots(1, 2)
        plt.sca(latent_axes)
        m.plot_latent()
        y = m.likelihood.Y[0, :].copy()
        data_show = GPy.util.visualize.stick_show(y[None, :],
                                                  connect=data['connect'])
        GPy.util.visualize.lvm_dimselect(m.X[0, :].copy(),
                                         m,
                                         data_show,
                                         latent_axes=latent_axes,
                                         sense_axes=sense_axes)
        raw_input('Press enter to finish')

    return m
def swiss_roll(optimize=True,
               verbose=1,
               plot=True,
               N=1000,
               num_inducing=15,
               Q=4,
               sigma=.2):
    import GPy
    from GPy.util.datasets import swiss_roll_generated
    from GPy.models import BayesianGPLVM

    data = swiss_roll_generated(num_samples=N, sigma=sigma)
    Y = data['Y']
    Y -= Y.mean()
    Y /= Y.std()

    t = data['t']
    c = data['colors']

    try:
        from sklearn.manifold.isomap import Isomap
        iso = Isomap().fit(Y)
        X = iso.embedding_
        if Q > 2:
            X = _np.hstack((X, _np.random.randn(N, Q - 2)))
    except ImportError:
        X = _np.random.randn(N, Q)

    if plot:
        import matplotlib.pyplot as plt
        from mpl_toolkits.mplot3d import Axes3D  # @UnusedImport
        fig = plt.figure("Swiss Roll Data")
        ax = fig.add_subplot(121, projection='3d')
        ax.scatter(*Y.T, c=c)
        ax.set_title("Swiss Roll")

        ax = fig.add_subplot(122)
        ax.scatter(*X.T[:2], c=c)
        ax.set_title("BGPLVM init")

    var = .5
    S = (var * _np.ones_like(X) +
         _np.clip(_np.random.randn(N, Q) * var**2, -(1 - var),
                  (1 - var))) + .001
    Z = _np.random.permutation(X)[:num_inducing]

    kernel = GPy.kern.rbf(Q, ARD=True) + GPy.kern.bias(
        Q, _np.exp(-2)) + GPy.kern.white(Q, _np.exp(-2))

    m = BayesianGPLVM(Y,
                      Q,
                      X=X,
                      X_variance=S,
                      num_inducing=num_inducing,
                      Z=Z,
                      kernel=kernel)
    m.data_colors = c
    m.data_t = t
    m['noise_variance'] = Y.var() / 100.

    if optimize:
        m.optimize('scg', messages=verbose, max_iters=2e3)

    if plot:
        fig = plt.figure('fitted')
        ax = fig.add_subplot(111)
        s = m.input_sensitivity().argsort()[::-1][:2]
        ax.scatter(*m.X.T[s], c=c)

    return m
Exemple #13
0
        download_data(args.input_path)

    X, Y, C = load_data(args.input_path, fname)
    if len(X.shape) == 1:
        X = X[:, None]

    n_tasks = Y.shape[0]
    n_configs = X.shape[0]
    index_task = np.repeat(np.arange(n_tasks), n_configs)
    Y_norm, _, _ = normalize_Y(deepcopy(Y.flatten()), index_task)

    # train the probabilistic encoder
    kern = GPy.kern.Matern52(Q_h, ARD=True)

    m_lvm = BayesianGPLVM(Y_norm.reshape(n_tasks, n_configs),
                          Q_h,
                          kernel=kern,
                          num_inducing=n_inducing_lvm)
    m_lvm.optimize(max_iters=10000, messages=1)

    ls = np.array([
        m_lvm.kern.lengthscale[i]
        for i in range(m_lvm.kern.lengthscale.shape[0])
    ])

    # generate data to train the multi-task model
    task_features_mean = np.array(m_lvm.X.mean / ls)
    task_features_std = np.array(np.sqrt(m_lvm.X.variance) / ls)
    X_train = []
    Y_train = []
    C_train = []