class TestFunctions(object):
    def init(self):
        self.no_samples = 20
        self.function = Parabola()
        self.real_dim = 2
        self.active_dim = 1
        self.function._set_dimension(self.active_dim)
        self.kernel = TripathyMaternKernel(self.real_dim, self.active_dim)

        # Hide the matrix over here!
        # self.real_W = np.asarray([
        #     [0, 0],
        #     [0, 1],
        #     [1, 0]
        # ])

        self.real_W = np.asarray([[1], [1]])
        self.real_W = self.real_W / np.linalg.norm(self.real_W)

        self.X = np.random.rand(self.no_samples, self.real_dim)
        Z = np.dot(self.X, self.real_W)
        assert Z.shape == (self.no_samples, self.active_dim)
        self.Y = self.function.f(Z.T).reshape(self.no_samples, 1)
        #assert self.Y.shape == (self.no_samples,)

        self.sn = 0.8

    def test_gp_regression(self):
        """
            The prediction of GPRegression sohuld be 1D!
        :return:
        """
        self.init()

        test_samples = 10

        Xrand = np.random.rand(test_samples, self.real_dim)

        # Check shape of GP
        gp_reg = GPRegression(self.X,
                              self.Y,
                              kernel=self.kernel,
                              noise_var=self.sn)

        y_hat = gp_reg.predict(Xrand)[
            0]  # Apparently, this predicts both mean and variance...

        assert y_hat.shape == (test_samples, 1), y_hat.shape

    def test_function_returns_non_zeros(self):
        self.init()
        pass
class TestMatrixRecovery(object):
    """
        We hide a function depending on a matrix A within a higher dimension.
        We then test if our algorithm can successfully approximate/find this matrix
        (A_hat is the approximated one).

        More specifically, check if:

        f(A x) = f(A_hat x)
    """

    def init(self):

        self.real_dim = 2
        self.active_dim = 1
        self.no_samples = 75
        self.kernel = TripathyMaternKernel(self.real_dim, self.active_dim)

        # Hide the matrix over here!
        if self.real_dim == 3 and self.active_dim == 2:
            self.function = Camelback()
            self.real_W = np.asarray([
                [0, 1],
                [1, 0],
                [0, 0]
            ])
        elif self.real_dim == 2 and self.active_dim == 1:
            self.function = Parabola()
            self.real_W = np.asarray([
                [1],
                [1],
            ])
            self.real_W = self.real_W / np.linalg.norm(self.real_W)
        else:
            assert False, "W was not set!"

        self.sn = 0.1

        self.X = np.random.rand(self.no_samples, self.real_dim)
        Z = np.dot(self.X, self.real_W)
        self.Y = self.function.f(Z.T).reshape(-1, 1)

        self.w_optimizer = t_WOptimizer(
            self.kernel,
            self.sn,
            np.asscalar(self.kernel.inner_kernel.variance),
            self.kernel.inner_kernel.lengthscale,
            self.X, self.Y
        )

        # We create the following kernel just to have access to the sample_W function!
        # TripathyMaternKernel(self.real_dim)

        self.tries = 10
        self.max_iter =  1 # 150

        assert False

        self.metrics = Metrics(self.no_samples)

    def test_if_function_is_found(self):
        """
            Replace these tests by the actual optimizer function!
        :return:
        """
        self.init()

        print("Real matrix is: ", self.real_W)

        all_tries = []
        for i in range(self.tries):
            # Initialize random guess
            W_hat = self.kernel.sample_W()

            # Find a good W!
            for i in range(self.max_iter):
                W_hat = self.w_optimizer.optimize_stiefel_manifold(W_hat)

            print("Difference to real W is: ", (W_hat - self.real_W))

            assert W_hat.shape == self.real_W.shape
            self.kernel.update_params(
                W=W_hat,
                l=self.kernel.inner_kernel.lengthscale,
                s=self.kernel.inner_kernel.variance
            )

            # TODO: update the gaussian process with the new kernels parameters! (i.e. W_hat)

            # Create the gp_regression function and pass in the predictor function as f_hat
            gp_reg = GPRegression(self.X, self.Y, self.kernel, noise_var=self.sn)
            res = self.metrics.mean_difference_points(
                fnc=self.function._f,
                fnc_hat=gp_reg.predict,
                A=self.real_W,
                A_hat=W_hat,
                X=self.X
            )

            all_tries.append(res)

        print(all_tries)

        assert np.asarray(all_tries).any()

    def test_if_hidden_matrix_is_found_multiple_initializations(self):
        self.init()

        print("Real matrix is: ", self.real_W)

        all_tries = []

        for i in range(self.tries):
            # Initialize random guess
            W_hat = self.kernel.sample_W()

            # Find a good W!
            for i in range(self.max_iter):
                W_hat = self.w_optimizer.optimize_stiefel_manifold(W_hat)

            print("Difference to real (AA.T) W is: ", (W_hat - self.real_W))

            assert W_hat.shape == self.real_W.shape
            assert not (W_hat == self.real_W).all()
            res = self.metrics.projects_into_same_original_point(self.real_W, W_hat)
            all_tries.append(res)

        assert True in all_tries
Example #3
0
class VisualizedTestingTau:

    def __init__(self):
        self.real_dim = 2
        self.active_dim = 1

        self.no_samples = 5
        self.kernel = TripathyMaternKernel(self.real_dim, self.active_dim)

        # Parameters
        self.sn = 2.
        self.W = self.kernel.sample_W()

        self.function = Parabola()
        self.real_W = np.asarray([
            [1],
            [1]
        ])
        self.real_W = self.real_W / np.linalg.norm(self.real_W)

        self.X = np.random.rand(self.no_samples, self.real_dim)
        Z = np.dot(self.X, self.real_W).reshape((-1, 1))
        self.Y = self.function.f(Z.T).squeeze()

        self.w_optimizer = t_WOptimizer(
            self.kernel, # TODO: does the kernel take over the W?
            self.sn,
            np.asscalar(self.kernel.inner_kernel.variance),
            self.kernel.inner_kernel.lengthscale,
            self.X, self.Y
        )

        # Define the plotting variables
        self.tau_arr = np.linspace(0., self.w_optimizer.tau_max, 100)

    def visualize_tau_trajectory_for_random_W(self):
        """
            Visualize the trajectory of the gamma
            function against the loss function
            we have f(tau) = F( gamma(tau, W) )
        :return:
        """
        loss_arr = []

        # Sample a random W
        W_init = self.kernel.sample_W()
        for tau in self.tau_arr:
            print("New tau is: ", tau)
            W = self.w_optimizer._gamma(tau, W_init)
            loss_val = loss(
                self.kernel,
                W,
                self.sn,
                self.kernel.inner_kernel.variance,
                self.kernel.inner_kernel.lengthscale,
                self.X,
                self.Y.squeeze()
            )
            loss_arr.append(loss_val)

        print(loss_arr)

        plt.title("Tau vs Loss - Randomly sampled W")
        plt.scatter(self.tau_arr, loss_arr)
        plt.axis([min(self.tau_arr), max(self.tau_arr), min(loss_arr), max(loss_arr)])
        plt.show()

    def visualize_tau_trajectory_for_identity_W(self):
        """
            Visualize the trajectory of the gamma
            function against the loss function
            we have f(tau) = F( gamma(tau, W) )
        :return:
        """
        loss_arr = []

        # Sample a random W
        W_init = self.real_W
        for tau in self.tau_arr:
            print("New tau is: ", tau)
            W = self.w_optimizer._gamma(tau, W_init)
            loss_val = loss(
                self.kernel,
                W,
                self.sn,
                self.kernel.inner_kernel.variance,
                self.kernel.inner_kernel.lengthscale,
                self.X,
                self.Y.squeeze()
            )
            loss_arr.append(loss_val)

        print(loss_arr)

        plt.title("Tau vs Loss - Identity-similarsampled W")
        plt.scatter(self.tau_arr, loss_arr)
        plt.axis([min(self.tau_arr), max(self.tau_arr), min(loss_arr), max(loss_arr)])
        plt.show()
Example #4
0
class VisualizedTestingWParabola:

    def __init__(self):
        self.real_dim = 2
        self.active_dim = 1

        self.no_samples = 50
        self.kernel = TripathyMaternKernel(self.real_dim, self.active_dim)

        # Parameters
        self.sn = 0.1
        self.W = self.kernel.sample_W()

        self.function = Parabola()
        self.real_W = np.asarray([
            [1],
            [1]
        ])
        self.real_W = self.real_W / np.linalg.norm(self.real_W)

        self.X = np.random.rand(self.no_samples, self.real_dim)
        Z = np.dot(self.X, self.real_W)
        self.Y = self.function.f(Z.T).reshape(-1, 1)

        self.w_optimizer = t_WOptimizer(
            self.kernel, # TODO: does the kernel take over the W?
            self.sn,
            np.asscalar(self.kernel.inner_kernel.variance),
            self.kernel.inner_kernel.lengthscale,
            self.X, self.Y
        )

        self.no_tries = 1000

    def visualize_quadratic_function(self):
        x_range = np.linspace(0., 1., 80)
        y_range = np.linspace(0., 1., 80)
        X = cartesian([x_range, y_range])

        import os
        if not os.path.exists("./pics/"):
            os.makedirs("./pics/")

        #################################
        #     TRAIN THE W_OPTIMIZER     #
        #################################

        Opt = TripathyOptimizer()

        for j in range(self.no_tries):
            print("Try number : ", j)

            W_hat = self.kernel.sample_W()
            self.kernel.update_params(
                W=W_hat,
                s=self.kernel.inner_kernel.variance,
                l=self.kernel.inner_kernel.lengthscale
            )

            W_hat, sn, l, s = Opt.run_two_step_optimization(self.kernel, self.sn, self.X, self.Y)

            # Create the gp_regression function and pass in the predictor function as f_hat
            self.kernel.update_params(W=W_hat, l=l, s=s)
            gp_reg = GPRegression(self.X, self.Y, self.kernel, noise_var=sn)

            y = self.function.f( np.dot(X, self.real_W).T )
            y_hat = gp_reg.predict(self.X)[0].squeeze()

            #################################
            #   END TRAIN THE W_OPTIMIZER   #
            #################################

            fig = plt.figure()
            ax = Axes3D(fig)

            # First plot the real function
            ax.scatter(X[:,0], X[:, 1], y, s=1)
            ax.scatter(self.X[:,0], self.X[:, 1], y_hat, cmap=plt.cm.jet)
            fig.savefig('./pics/Iter_' + str(j) + '.png', )
            # plt.show()
            plt.close(fig)

            # Save the W just in case
            l = loss(
                self.kernel,
                W_hat,
                sn,
                s,
                l,
                self.X,
                self.Y
            )
            np.savetxt("./pics/Iter_" + str(j) + "__" + "Loss_" + str(l) + ".txt", W_hat)