def _kern(): kern_thin_layer = ThinLayer(np.array([0., 0., 0.]), priors['tec_scale'], active_dims=slice(2, 6, 1)) kern_time = Matern32(1, active_dims=slice(6, 7, 1)) kern_dir = Matern32(2, active_dims=slice(0, 2, 1)) ### # time kern kern_time.lengthscales = np.exp(tec_kern_time_ls[0]) kern_time.lengthscales.prior = LogNormal( tec_kern_time_ls[0], tec_kern_time_ls[1]**2) kern_time.lengthscales.set_trainable(True) kern_time.variance = 1. #np.exp(tec_kern_var[0]) #kern_time.variance.prior = LogNormal(tec_kern_var[0],tec_kern_var[1]**2) kern_time.variance.set_trainable(False) # ### # directional kern kern_dir.variance = np.exp(tec_kern_var[0]) kern_dir.variance.prior = LogNormal(tec_kern_var[0], tec_kern_var[1]**2) kern_dir.variance.set_trainable(True) kern_dir.lengthscales = np.exp(tec_kern_dir_ls[0]) kern_dir.lengthscales.prior = LogNormal( tec_kern_dir_ls[0], tec_kern_dir_ls[1]**2) kern_dir.lengthscales.set_trainable(True) kern = kern_dir * kern_time #(kern_thin_layer + kern_dir)*kern_time return kern
def init_kern_act(num_pitches): """Initialize kernels for activations and components""" kern_act = [] for i in range(num_pitches): kern_act.append(Matern32(1, lengthscales=1.0, variance=3.5)) return kern_act
def main(): dataname = "1DGP_MaternCombo1" ptr = "data/toy_data/" + dataname + '/' n_functions = 1000 lengthscales = 1.0 kernel = Matern52(variance=1.0, lengthscales=2.0) + Matern32( variance=2.0, lengthscales=1.0) data_generator = GPDataGenerator(kernel=kernel) x_trains = [] y_trains = [] x_tests = [] y_tests = [] #Generate n_functions sets of values of x in the range [min_x, max_x], to be used for training and testing plt.figure() for i in range(n_functions): if i % (n_functions // 5) == 0: n_train = np.random.randint(low=5, high=10) n_test = 20 else: n_train = np.random.randint(low=25, high=100) n_test = int(0.2 * n_train) x_train, y_train, x_test, y_test = data_generator.sample( train_size=n_train, test_size=n_test, x_min=-3, x_max=3) x_trains.append(x_train) y_trains.append(y_train) x_tests.append(x_test) y_tests.append(y_test) if i == 0: plt.scatter(x_train, y_train, c='r', s=1, label="train") plt.scatter(x_test, y_test, c="magenta", s=1, label="test") elif i == 1: plt.scatter(x_train, y_train, c='black', s=1, label="train") plt.scatter(x_test, y_test, c="yellow", s=1, label="test") elif i == 2: plt.scatter(x_train, y_train, c='b', s=1, label="train") plt.scatter(x_test, y_test, c="g", s=1, label="test") plt.legend() plt.xlabel("x") plt.xticks([]) plt.ylabel('f(x)') plt.yticks([]) plt.show() x_trains = np.array(x_trains) y_trains = np.array(y_trains) x_tests = np.array(x_tests) y_tests = np.array(y_tests) np.save(ptr + dataname + "_X_trains.npy", x_trains) np.save(ptr + dataname + "_y_trains.npy", y_trains) np.save(ptr + dataname + "_X_tests.npy", x_tests) np.save(ptr + dataname + "_y_tests.npy", y_tests)
def test_gaussian_mean_and_variance(Ntrain, Ntest, D): data = rng.randn(Ntrain, D), rng.randn(Ntrain, 1) Xtest, _ = rng.randn(Ntest, D), rng.randn(Ntest, 1) kernel = Matern32() + gpflow.kernels.White() model_gp = gpflow.models.GPR(data, kernel=kernel) mu_f, var_f = model_gp.predict_f(Xtest) mu_y, var_y = model_gp.predict_y(Xtest) assert np.allclose(mu_f, mu_y) assert np.allclose(var_f, var_y - 1.)
def test_gaussian_full_cov_samples(input_dim, output_dim, N, Ntest, M, num_samples): samples_shape = (num_samples, Ntest, output_dim) X, Y, _ = rng.randn(N, input_dim), rng.randn(N, output_dim), rng.randn( M, input_dim) Xtest = rng.randn(Ntest, input_dim) kernel = Matern32() model_gp = gpflow.models.GPR([X, Y], kernel=kernel) samples = model_gp.predict_f_samples(Xtest, num_samples) assert samples.shape == samples_shape
def test_gaussian_log_density(Ntrain, Ntest, D): data = rng.randn(Ntrain, D), rng.randn(Ntrain, 1) Xtest, Ytest = rng.randn(Ntest, D), rng.randn(Ntest, 1) kernel = Matern32() + gpflow.kernels.White() model_gp = gpflow.models.GPR(data, kernel=kernel) mu_y, var_y = model_gp.predict_y(Xtest) data = Xtest, Ytest log_density = model_gp.predict_log_density(data) log_density_hand = (-0.5 * np.log(2 * np.pi) - 0.5 * np.log(var_y) - 0.5 * np.square(mu_y - Ytest) / var_y) assert np.allclose(log_density_hand, log_density)
def _build_kernel(self, kern_dir_ls=0.3, kern_time_ls=50., kern_var=1., include_time=True, include_dir=True, **priors): kern_var = 1. if kern_var == 0. else kern_var kern_dir = Matern32(2,active_dims=slice(0,2,1)) kern_dir.variance.trainable = False kern_dir.lengthscales = kern_dir_ls kern_dir_ls = log_normal_solve(kern_dir_ls, 0.5*kern_dir_ls) kern_dir.lengthscales.prior = LogNormal(kern_dir_ls[0], kern_dir_ls[1]**2) kern_dir.lengthscales.trainable = False#True kern_time = Matern32(1,active_dims=slice(2,3,1)) kern_time.variance = kern_var kern_var = log_normal_solve(kern_var,0.5*kern_var) kern_time.variance.prior = LogNormal(kern_var[0], kern_var[1]**2) kern_time.variance.trainable = False#True kern_time.lengthscales = kern_time_ls kern_time_ls = log_normal_solve(kern_time_ls, 0.5*kern_time_ls) kern_time.lengthscales.prior = LogNormal(kern_time_ls[0], kern_time_ls[1]**2) kern_time.lengthscales.trainable = False#True kern_white = gp.kernels.White(3) kern_white.variance = 1. kern_time.variance.trainable = False#True if include_time: if include_dir: return kern_dir*kern_time return kern_time else: if include_dir: kern_dir.variance.trainable = True return kern_dir return kern_white return kern_dir*kern_time
def test_gaussian_full_cov(input_dim, output_dim, N, Ntest, M): covar_shape = (output_dim, Ntest, Ntest) X, Y, Z = rng.randn(N, input_dim), rng.randn(N, output_dim), rng.randn( M, input_dim) Xtest = rng.randn(Ntest, input_dim) kernel = Matern32() model_gp = gpflow.models.GPR([X, Y], kernel=kernel) mu1, var = model_gp.predict_f(Xtest, full_cov=False) mu2, covar = model_gp.predict_f(Xtest, full_cov=True) assert np.allclose(mu1, mu2, atol=1.e-10) assert covar.shape == covar_shape assert var.shape == (Ntest, output_dim) for i in range(output_dim): assert np.allclose(var[:, i], np.diag(covar[i, :, :]))
def __init__(self, model_class, kernel=Matern32(), likelihood=gpflow.likelihoods.Gaussian(), whiten=None, q_diag=None, requires_inducing_variables=True, requires_data=False, requires_likelihood=True): self.model_class = model_class self.kernel = kernel self.likelihood = likelihood self.whiten = whiten self.q_diag = q_diag self.requires_inducing_variables = requires_inducing_variables self.requires_data = requires_data self.requires_likelihood = requires_likelihood
def init_kern(num_pitches, energy, frequency): """Initialize kernels for activations and components""" k_act, k_com = [], [] k_com_a, k_com_b = [], [] for i in range(num_pitches): k_act.append(Matern32(1, lengthscales=0.25, variance=3.5)) k_com_a.append(Matern52(1, lengthscales=0.25, variance=1.0)) k_com_a[i].variance.fixed = True k_com_a[i].lengthscales.transform = gpflow.transforms.Logistic(0., 0.5) k_com_b.append( MercerCosMix(input_dim=1, energy=energy[i].copy(), frequency=frequency[i].copy(), variance=0.25, features_as_params=False)) k_com_b[i].fixed = True k_com.append(k_com_a[i] * k_com_b[i]) kern = [k_act, k_com] return kern
def __init__(self, h: Bandwidth, variance: float): super().__init__(name="Matern32") self.kernel = Matern32(variance=variance) self.h = h
##################################### ###### Test Dataset Parameters ###### ##################################### ip = 0. # Intervention point dc = 1.0 # Discontinuity sigma = 0.5 # Standard deviation sigma_d = 0. # Value added to the standard deviation after the intervention point n = 20 # Number of data points ############################ ###### Kernel Options ###### ############################ Matern = Matern32() linear_kernel = Linear() + Constant() # "Linear" kernel exp_kernel = Exponential() RBF_kernel = SquaredExponential() kernel_names = ['Linear', 'Exponential', 'Gaussian', 'Matern', 'BMA'] kernels = [linear_kernel, exp_kernel, RBF_kernel, Matern] # make a dictionary that zips the kernel names with the corresponding kernel kernel_dict = dict(zip( kernel_names, kernels)) # making a dictionary of kernels and their corresponding names ########################################### ###### Generation of Test Dataset ###### ###########################################
class Container(dict, tf.Module): def __init__(self): super().__init__() res = { 'ckv': list(), 'ckl': list(), 'clv': list(), 'dkv': list(), 'dkl': list(), 'dlv': list() } kernels = [Linear() + Constant(), RBF(), Matern32(), Exponential()] SHOW_PLOTS = 1 epochs = 5 container = Container() for e in range(epochs): print(f'epoch {e}') np.random.seed(e) ''' n = 100 # Number of data points x = np.linspace(-3, 3, n) # Evenly distributed x values