Пример #1
0
def gpr(x,
        y,
        x_pred,
        return_std=True,
        return_kernel=False,
        exp_sine_sq_kernel=False):
    mindt = np.min((x[1:] - x[:-1]))
    maxdt = np.max((x[1:] - x[:-1]))
    minY = min(np.min(y, axis=0))
    maxY = max(np.max(y, axis=0))
    avgY = np.average(np.average(y, axis=0))
    if exp_sine_sq_kernel:
        kernel = ConstantKernel(avgY**2, (minY**2, maxY**2)) *\
             ExpSineSquared(1, 2.5*(maxdt-mindt),
                            (1.0*mindt, 10.0*maxdt),
                            periodicity_bounds=(1e-6, 1e+2)) +\
             WhiteKernel()
    else:
        kernel = ConstantKernel(avgY**2, (minY**2, maxY**2)) *\
             RBF(2.5*(maxdt-mindt), (1.0*mindt, 10.0*maxdt)) +\
             WhiteKernel()
    print("kernel: ", end='')
    print(kernel)
    print(kernel.get_params())
    gp = GaussianProcessRegressor(kernel, normalize_y=True)
    gp.fit(x, y)
    if return_std:
        x_predicetd, s_predicted = gp.predict(np.array(x_pred).reshape(-1, 1),
                                              return_std=return_std)
        if return_kernel:
            return x_predicetd, s_predicted, gp.kernel_
        else:
            return x_predicetd, s_predicted
    else:
        x_predicetd = gp.predict(np.array(x_pred).reshape(-1, 1),
                                 return_std=return_std)
        if return_kernel:
            return x_predicetd, gp.kernel_
        else:
            return x_predicetd
# for i in word_tokenize(word_token):
#     print(i)

import numpy as np
X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
Y = np.array([1, 1, 1, 2, 2, 2])
from sklearn.naive_bayes import GaussianNB
clf = GaussianNB()
clf.fit(X, Y)

print(clf.predict([[-0.8, -1]]))

clf_pf = GaussianNB()
clf_pf.partial_fit(X, Y, np.unique(Y))

print(clf_pf.predict([[-0.8, -1]]))

##################################

from sklearn.gaussian_process.kernels import ConstantKernel, RBF
kernel = ConstantKernel(constant_value=1.0, constant_value_bounds=(
    0.0, 10.0)) * RBF(length_scale=0.5, length_scale_bounds=(0.0, 10.0)) + RBF(
        length_scale=2.0, length_scale_bounds=(0.0, 10.0))
for hyperparameter in kernel.hyperparameters:
    print(hyperparameter)
params = kernel.get_params()
for key in sorted(params):
    print("%s : %s" % (key, params[key]))
print(kernel.theta)  # Note: log-transformed
print(kernel.bounds)  # Note: log-transformed
Пример #3
0
H_obs_ar_1 = np.zeros((1,9))
H_obs_ar_2 = np.zeros((1,9))
H_h_ar_1 = np.zeros((1))
H_h_ar_2 = np.zeros((1))

# Policy
P_kernel =  ConstantKernel(.008)*Matern(length_scale=.4, nu=1.5) + WhiteKernel(noise_level=1e-7) 
P_gp_1 = gaussian_process.GaussianProcessRegressor(kernel=P_kernel, optimizer = None, normalize_y=False)
P_gp_2 = gaussian_process.GaussianProcessRegressor(kernel=P_kernel, optimizer = None, normalize_y=False)
P_obs_ar_1 = np.zeros((1,8))
P_obs_ar_2 = np.zeros((1,8))
P_actions_1 = np.zeros((1))
P_actions_2 = np.zeros((1))

# Sparsification parameterization
thresshold_std = .5 * np.sqrt(P_kernel.get_params()['k1__k1__constant_value']) # * P_kernel.get_params()['k1__k2__length_scale'] # 1 * sqrt(Constant_kernel) * length_scale (standard deviation)


for i_episode in range(max_num_of_episodes):

	print('Starting episode', i_episode+1)

	# Initiate environement, agent and oracle
	observation = env.reset()  

	# Cumulative reward set to 0	
	reward_c = 0.
	feedback_e_1 = 0
	feedback_e_2 = 0

	for t in range(env._max_episode_steps+1):
Пример #4
0
class Normal_SEKernel(Kernel):
    """ kernels for Normal pdf convolved with Squared Exponential kernel.

    :math:`\\varphi(r) = \\exp(\\-theta_2^2||x-x'||^2) + \\theta_3^2\\delta_{ii'}` which is
    positive definite.
    """
    def __init__(self, Sigma):
        super().__init__()
        # squared exponential kernel for GP
        self.GPkernel = ConstantKernel(1, (1e-3, 1e3)) * RBF(1, (1e-3, 100)) + \
                          WhiteKernel(1e-3, (1e-6, 1e-1))
        self.Sigma = Sigma
        self.dim = Sigma.shape[1]

    def updatekernel(self, kernel):
        """ update the GPkernel with a new kernel
      """
        self.GPkernel = kernel

    def get_hyperparameters(self):
        """ get hyperparameters from GPkernel
      """
        theta0 = self.GPkernel.get_params()['k1__k1__constant_value']
        theta1 = self.GPkernel.get_params()['k1__k2__length_scale']
        theta2 = self.GPkernel.get_params()['k2__noise_level']
        return np.array([theta0, theta1, theta2])

    def mollifiedx1(self, X, Y):
        """ Once mollified kernel
      """
        # get optimized hyperparameters
        theta = self.get_hyperparameters()

        # A + 2Sigma
        A = (theta[1]**2) * np.eye(self.dim)
        B = A + self.Sigma
        Binv = np.linalg.inv(B)
        # inv(A + Sigma)
        # constant
        c = theta[0] * np.sqrt(np.linalg.det(A) / np.linalg.det(B))
        # kernel
        kernel = lambda i, j: c * np.exp(-0.5 *
                                         (X[i] - Y[j]) @ Binv @ (X[i] - Y[j]))
        # kernel
        g = np.vectorize(kernel)
        # make kernel matrix
        K = np.fromfunction(g, (len(X), len(Y)), dtype=int)
        return K

    def mollifiedx2(self, X):
        """ Twice mollified kernel
      """
        # get optimized hyperparameters
        theta = self.get_hyperparameters()

        # A + 2Sigma
        A = (theta[1]**2) * np.eye(self.dim)
        B = A + 2 * self.Sigma
        Binv = np.linalg.inv(B)
        # inv(A + 2Sigma)
        # constant
        c = theta[0] * np.sqrt(np.linalg.det(A) / np.linalg.det(B))
        # kernel
        kernel = lambda i, j: c * np.exp(-0.5 *
                                         (X[i] - X[j]) @ Binv @ (X[i] - X[j]))
        g = np.vectorize(kernel)
        # make kernel matrix
        K = np.fromfunction(g, (len(X), len(X)),
                            dtype=int) + (theta[2]) * np.eye(len(X))
        return K