def compute_expectation_sample(self, parameters_kernel):
        """
        Correct
        :param parameters_kernel:
        :return:
        """
        historical_points = self.gp.data['points']
        var, beta = self.estimate_variance_gp(parameters_kernel)
        y = self.gp.data['evaluations']
        n = len(y)
        z = np.ones(n)

        create_vector = np.zeros(
            (historical_points.shape[0] * len(self.domain_xe), historical_points.shape[1]))

        for i in range(historical_points.shape[0]):
            for j in range(len(self.domain_xe)):
                first_part = historical_points[i][0:self.x_domain]
                point = np.concatenate((first_part, np.array(self.domain_xe[j])))
                create_vector[(i-1)*len(self.domain_xe) + j, :] = point

        cov = self.gp.evaluate_cov(historical_points, parameters_kernel)
        chol = cholesky(cov, max_tries=7)

        matrix_cov = self.gp.kernel.evaluate_cross_cov_defined_by_params(
            parameters_kernel, self.gp.data['points'], create_vector, historical_points.shape[1])
        matrix_cov = np.dot(
            matrix_cov, np.kron(np.identity(n), self.weights.reshape((len(self.weights),1))))
        vect = y - beta * z
        vect = vect.reshape((len(vect), 1))
        solve = np.dot(matrix_cov.transpose() ,cho_solve(chol, vect))

        sample = np.ones((n, 1)) * beta + solve
        return sample
    def estimate_variance_gp(self, parameters_kernel, chol=None):
        """
        Correct
        :param parameters_kernel:
        :param chol:
        :return:
        """
        historical_points = self.gp.data['points']
        if chol is None:
            cov = self.gp.evaluate_cov(historical_points, parameters_kernel)
            chol = cholesky(cov, max_tries=7)

        y = self.gp.data['evaluations']
        n = chol.shape[0]
        z = np.ones(n)

        solve = cho_solve(chol, y)
        part_1 = np.dot(y, solve)

        solve_2 = cho_solve(chol, z)
        part_2 = np.dot(z, solve_2)

        beta = np.dot(z, solve) / part_2
        part_2 *= (beta ** 2)

        return (part_1 - part_2) / float(n - 1), beta
Beispiel #3
0
    def log_likelihood(self,
                       gp_model,
                       parameters_kernel,
                       mean_parameters=None,
                       weights=None):
        """
        GP log likelihood: y(x) ~ f(x) + epsilon, where epsilon(x) are iid N(0,var_noise), and
        f(x) ~ GP(mean, cov)

        :param var_noise: (float) variance of the noise
        :param mean: (float)
        :param parameters_kernel: np.array(k), The order of the parameters is given in the
            definition of the class kernel.
        :return: float

        """

        X_data = gp_model.data['points']
        cov = self.covariance_diff_kernel(gp_model, X_data, parameters_kernel)
        chol = cholesky(cov, max_tries=7)

        if self.parametric_mean:
            mean = self.compute_parametric_mean(gp_model, weights,
                                                mean_parameters)
        else:
            mean = 0
        y_unbiased = gp_model.data['evaluations'] - mean
        solve = cho_solve(chol, y_unbiased)

        return -np.sum(np.log(np.diag(chol))) - 0.5 * np.dot(y_unbiased, solve)
Beispiel #4
0
    def compute_posterior_params(self,
                                 gp_model,
                                 kernel_params,
                                 mean_parameters=None,
                                 weights=None):
        ## Mean is zero. We may need to change when including mean
        ##Compute posterior parameters of f(x*)

        f = self.function_factor_kernel
        g = self.divisor_kernel

        current_point = [gp_model.current_iteration]

        X_data = gp_model.data['points']
        vector_ = self.cov_diff_point(gp_model, kernel_params, current_point,
                                      X_data)

        cov = self.covariance_diff_kernel(gp_model, X_data, kernel_params)
        chol = cholesky(cov, max_tries=7)

        if self.parametric_mean:
            mean = self.compute_parametric_mean(gp_model, weights,
                                                mean_parameters)
            prior_mean = self.parametrics.weighted_combination(
                current_point[0], weights, mean_parameters)
        else:
            mean = 0.
            prior_mean = 0.

        y_unbiased = gp_model.data['evaluations'] - mean
        solve = cho_solve(chol, y_unbiased)

        part_2 = cho_solve(chol, vector_)

        if self.model_gradient == 'real_gradient':
            grad = np.array(gp_model.raw_results['gradients'][-1])
        elif self.model_gradient == 'grad_epoch':
            grad = np.array(
                gp_model.raw_results['gradients'][gp_model.current_iteration -
                                                  1])

        mean = gp_model.raw_results['values'][-1][0] - \
               grad * (np.dot(vector_, solve) + prior_mean) \
               / np.sqrt(gp_model.current_iteration)

        raw_cov = gp_model.kernel.evaluate_cov_defined_by_params(
            kernel_params, np.array([current_point]), 1)

        var = raw_cov - np.dot(vector_, part_2)
        var *= (grad**2) / (float(gp_model.current_iteration))

        return mean[0], var[0, 0]
Beispiel #5
0
    def log_likelihood(self, gp_model, kernel_parameters):

        X_data = gp_model.data['points']

        cov = gp_model.kernel.evaluate_cov_defined_by_params(kernel_parameters, X_data, 1)
        chol = cholesky(cov, max_tries=7)


        y_unbiased = gp_model.data['evaluations']


        solve = cho_solve(chol, y_unbiased)

        return -np.sum(np.log(np.diag(chol))) - 0.5 * np.dot(y_unbiased, solve)
    def test_cholesky(self):
        chol = cholesky(self.cov)
        npt.assert_almost_equal(np.dot(chol, chol.transpose()), self.cov)

        with self.assertRaises(linalg.LinAlgError):
            cholesky(self.cov_2)

        chol_ = cholesky(self.cov_2, max_tries=7)
        npt.assert_almost_equal(self.cov_2, np.dot(chol_, chol_.transpose()), decimal=1)

        with self.assertRaises(linalg.LinAlgError):
            cholesky(np.array([[-1, 5], [3, 7]]))
    def log_posterior_distribution_length_scale(self, parameters_kernel):
        """
        Correct
        :param parameters_kernel:
        :return:
        """
        historical_points = self.gp.data['points']
        cov = self.gp.evaluate_cov(historical_points, parameters_kernel)

        n = cov.shape[0]
        y = np.ones(n)
        chol = cholesky(cov, max_tries=7)
        determinant_cov = np.product(np.diag(chol)) ** 2

        solve = cho_solve(chol, y)
        part_1 = np.dot(y, solve)
        var = self.estimate_variance_gp(parameters_kernel, chol=chol)[0]

        objective = \
            -(n-1) * 0.5 * np.log(var) - 0.5 * np.log(determinant_cov) - 0.5 * np.log(part_1)

        return objective
    def compute_posterior_params(self, gp_model, kernel_params, mean_parameters=None, weights=None):
        ## Mean is zero. We may need to change when including mean
        ##Compute posterior parameters of f(x*)

        f = self.function_factor_kernel
        g = self.divisor_kernel

        current_point = [gp_model.current_iteration]

        X_data = gp_model.data['points']
        vector_ = self.cov_diff_point(gp_model, kernel_params, current_point, X_data)

        cov = self.covariance_diff_kernel(gp_model, X_data, kernel_params)
        chol = cholesky(cov, max_tries=7)

        if self.parametric_mean:
            mean = self.compute_parametric_mean(gp_model, weights, mean_parameters)
            prior_mean = self.parametrics.weighted_combination(
                current_point[0], weights, mean_parameters) / f(current_point[0])
        else:
            mean = 0.
            prior_mean = 0.

        y_unbiased = gp_model.data['evaluations'] - mean
        solve = cho_solve(chol, y_unbiased)

        part_2 = cho_solve(chol, vector_)

        mean = gp_model.raw_results[-1] + np.dot(vector_, solve) + prior_mean

        raw_cov = gp_model.kernel.evaluate_cov_defined_by_params(
            kernel_params, np.array([current_point]), 1) / \
                  g(f(current_point[0]), f(current_point[0]))

        var = raw_cov - np.dot(vector_, part_2)

        return mean, var
    def evaluate_squared_error(self, environment, control, parameters_kernel):
        """
        Correct
        :param control:
        :param environment:
        :param parameters_kernel:
        :return:
        """
        new_point = np.concatenate((control, environment))
        new_point = new_point.reshape((1, len(new_point)))

        historical_points = self.gp.data['points']
        dim_kernel = historical_points.shape[1]
        cov = self.gp.evaluate_cov(historical_points, parameters_kernel)
        chol = cholesky(cov, max_tries=7)

        var, beta = self.estimate_variance_gp(parameters_kernel)

        y = self.gp.data['evaluations']
        n = len(y)

        r = self.gp.kernel.evaluate_cross_cov_defined_by_params(
            parameters_kernel, historical_points, new_point, dim_kernel)
        one = np.ones(n)
        vect = y - beta * one
        m1 = beta + np.dot(r.transpose(), cho_solve(chol, vect.reshape((len(vect), 1))))
        M = np.concatenate((y,  m1[0, :]))
        M = M.reshape((len(M), 1))

        candidate_vector = np.zeros((len(self.domain_xe), dim_kernel))
        for j in range(len(self.domain_xe)):
            point = np.concatenate((control, np.array(self.domain_xe[j])))
            candidate_vector[j, :] = point
        R3 = self.gp.kernel.evaluate_cross_cov_defined_by_params(
            parameters_kernel, candidate_vector, candidate_vector, dim_kernel)
        R3SN = self.gp.kernel.evaluate_cross_cov_defined_by_params(
            parameters_kernel, candidate_vector, historical_points, dim_kernel)
        weights_matrix = self.weights.reshape((len(self.weights), 1))
        r3 = self.gp.kernel.evaluate_cross_cov_defined_by_params(
            parameters_kernel, candidate_vector, new_point, dim_kernel)
        R = np.dot(weights_matrix.transpose(), np.dot(R3, weights_matrix))

        e12 = np.dot(weights_matrix.transpose(), np.concatenate((R3SN, r3), axis=1))

        covE = np.concatenate((cov, r), axis=1)
        one_matrix = np.ones((r.shape[1], r.shape[1]))
        cov_aux = np.concatenate((r.transpose(), one_matrix), axis=1)
        covE = np.concatenate((covE, cov_aux), axis=0)

        cholE = cholesky(covE, max_tries=7)
        R -= np.dot(e12, cho_solve(cholE, e12.transpose()))

        temp = (1 - np.dot(e12, cho_solve(cholE, np.ones((n+1, 1))))) ** 2
        temp /= np.dot(np.ones((n+1, 1)).transpose(), cho_solve(cholE, np.ones((n+1, 1))))
        R += temp

        aux = np.dot(M.transpose(), cho_solve(cholE, M))
        ones_vec = np.ones((n+1,1))
        aux_2 = np.dot(np.dot(M.transpose(), cho_solve(cholE, ones_vec)), ones_vec.transpose())
        aux_2 = np.dot(aux_2, cho_solve(cholE, M))
        aux_2 /= (np.dot(ones_vec.transpose(), cho_solve(cholE, ones_vec)))
        aux -= aux_2
        aux += ((n - 1) / float(n - 3)) * var

        value = aux
        value *= 1.0 / (n - 2)
        value *= R
        return value
    def compute_mc_given_sample(self, sample, candidate_point, parameters_kernel):
        """
        Correct
        :param sample:
        :param candidate_point:
        :param parameters_kernel:
        :return:
        """
        if len(sample.shape) == 2:
            sample = sample[:, 0]

        n = len(sample)
        one = np.ones(2 * n)
        historical_points = self.gp.data['points']
        y = self.gp.data['evaluations']

        Z = np.concatenate((y, sample))

        cov_1 = self.gp.evaluate_cov(historical_points, parameters_kernel)


        create_vector = np.zeros(
            (historical_points.shape[0] * len(self.domain_xe), historical_points.shape[1]))

        for i in range(historical_points.shape[0]):
            for j in range(len(self.domain_xe)):
                first_part = historical_points[i][0:self.x_domain]
                point = np.concatenate((first_part, np.array(self.domain_xe[j])))
                create_vector[(i-1)*len(self.domain_xe) + j, :] = point
        cov_2 = self.gp.kernel.evaluate_cross_cov_defined_by_params(
            parameters_kernel, self.gp.data['points'], create_vector, historical_points.shape[1])
        q23 = np.dot(
            cov_2, np.kron(np.identity(n), self.weights.reshape((len(self.weights),1))))

        cov_3 = self.gp.evaluate_cov(create_vector, parameters_kernel)
        q33 = np.dot(np.kron(np.identity(n), self.weights.reshape((1, len(self.weights)))), cov_3)
        q33 = np.dot(q33, np.kron(np.identity(n), self.weights.reshape((len(self.weights),1))))

        C = np.concatenate((cov_1, q23), axis=1)
        c_aux = np.concatenate((q23.transpose(), q33), axis=1)
        C= np.concatenate((C, c_aux), axis=0)

        chol = cholesky(C, max_tries=7)

        solv = cho_solve(chol, Z.reshape((len(Z), 1)))
        solv_2 = cho_solve(chol, one.reshape((len(one), 1)))

        bc = np.dot(one, solv) / np.dot(one, solv_2)

        candidate_vector = np.zeros((len(self.domain_xe), historical_points.shape[1]))
        for j in range(len(self.domain_xe)):
            point = np.concatenate((candidate_point, np.array(self.domain_xe[j])))
            candidate_vector[j, :] = point
        cov_2 = self.gp.kernel.evaluate_cross_cov_defined_by_params(
            parameters_kernel, candidate_vector, create_vector, historical_points.shape[1])

        cov_4 = self.gp.kernel.evaluate_cross_cov_defined_by_params(
            parameters_kernel, candidate_vector, historical_points, historical_points.shape[1])

        weights_matrix = self.weights.reshape((len(self.weights),1))
        kron = np.kron(np.identity(n), weights_matrix)
        big_cov = np.concatenate((cov_4, np.dot(cov_2, kron)), axis=1)

        c = np.dot(weights_matrix.transpose(), big_cov)

        vec_1 = Z - bc * one
        part_1 = np.dot(c, cho_solve(chol, vec_1.reshape((len(vec_1), 1))))
        mc = bc + part_1
        return mc[0,0], c, chol, Z, bc
 def test_cho_solve(self):
     chol = cholesky(self.cov)
     y = np.linspace(1.0, 100.0, self.cov.shape[0])
     sol = cho_solve(chol, y)
     npt.assert_almost_equal(np.dot(self.cov, sol), y)