Exemplo n.º 1
0
    def __init__(self,
                 X,
                 y,
                 Xtest,
                 kernel='Matern52',
                 lengthscale=None,
                 lengthscale_init=None,
                 iterations=50,
                 learning_rate=.1,
                 grid_points_ratio=1.,
                 maxroot=100,
                 num_batches=10,
                 calculate_sd=0,
                 use_gpu=1,
                 verbose=0,
                 seed=0):
        """
        Initiates reconstructor parameters
        and pre-processes training and test data arrays
        """
        torch.manual_seed(seed)
        input_dim = np.ndim(y)
        X, y = gprutils.prepare_training_data(X, y)
        Xtest = gprutils.prepare_test_data(Xtest)
        self.X, self.y, self.Xtest = X, y, Xtest
        self.toeplitz = gpytorch.settings.use_toeplitz(True)
        self.maxroot = gpytorch.settings.max_root_decomposition_size(maxroot)
        if use_gpu and torch.cuda.is_available():
            torch.cuda.empty_cache()
            torch.cuda.manual_seed_all(seed)
            torch.backends.cudnn.deterministic = True
            torch.backends.cudnn.benchmark = False
            torch.set_default_tensor_type(torch.cuda.DoubleTensor)
            self.X, self.y = self.X.cuda(), self.y.cuda()
            self.Xtest = self.Xtest.cuda()
            self.toeplitz = gpytorch.settings.use_toeplitz(False)
        else:
            torch.set_default_tensor_type(torch.DoubleTensor)
        self.likelihood = gpytorch.likelihoods.GaussianLikelihood()
        _kernel = get_kernel(kernel,
                             input_dim,
                             use_gpu,
                             lengthscale=lengthscale,
                             lengthscale_init=lengthscale_init)
        self.model = skgprmodel(self.X, self.y, _kernel, self.likelihood,
                                input_dim, grid_points_ratio)
        if use_gpu:
            self.model.cuda()
        self.iterations = iterations
        self.num_batches = num_batches
        self.calculate_sd = calculate_sd
        self.lr = learning_rate

        self.lscales, self.noise_all = [], []
        self.hyperparams = {
            "lengthscale": self.lscales,
            "noise": self.noise_all,
        }
        self.verbose = verbose
Exemplo n.º 2
0
 def update_posterior(self):
     """
     Updates GP posterior
     """
     X_sparse_new, y_sparse_new = gprutils.prepare_training_data(
         self.X_sparse, self.y_sparse)
     if self.use_gpu and torch.cuda.is_available():
         X_sparse_new, y_sparse_new = X_sparse_new.cuda(
         ), y_sparse_new.cuda()
     self.surrogate_model.model.X = X_sparse_new
     self.surrogate_model.model.y = y_sparse_new
     self.surrogate_model.train(verbose=self.verbose)
     return
Exemplo n.º 3
0
    def __init__(self,
                 X,
                 y,
                 Xtest=None,
                 kernel='RBF',
                 lengthscale=None,
                 ski=True,
                 learning_rate=.1,
                 iterations=50,
                 use_gpu=1,
                 verbose=1,
                 seed=0,
                 **kwargs):
        """
        Initiates reconstructor parameters
        and pre-processes training and test data arrays
        """
        self.precision = kwargs.get("precision", "double")
        if self.precision == 'single':
            self.tensor_type = torch.FloatTensor
            self.tensor_type_gpu = torch.cuda.FloatTensor
        else:
            self.tensor_type = torch.DoubleTensor
            self.tensor_type_gpu = torch.cuda.DoubleTensor
        torch.manual_seed(seed)
        if use_gpu and torch.cuda.is_available():
            torch.cuda.empty_cache()
            torch.cuda.manual_seed_all(seed)
            torch.backends.cudnn.deterministic = True
            torch.backends.cudnn.benchmark = False
            torch.set_default_tensor_type(self.tensor_type_gpu)
        input_dim = np.ndim(y)
        if Xtest is not None:
            self.fulldims = Xtest.shape[1:]
        else:
            self.fulldims = X.shape[1:]
        X, y = gprutils.prepare_training_data(X, y, precision=self.precision)
        if Xtest is not None:
            Xtest = gprutils.prepare_test_data(Xtest, precision=self.precision)
        self.X, self.y, self.Xtest = X, y, Xtest
        self.do_ski = ski
        if kernel == "Spectral":
            self.do_ski = False
        self.toeplitz = gpytorch.settings.use_toeplitz(True)
        maxroot = kwargs.get("maxroot", 100)
        self.maxroot = gpytorch.settings.max_root_decomposition_size(maxroot)
        if use_gpu and torch.cuda.is_available():
            self.X, self.y = self.X.cuda(), self.y.cuda()
            if self.Xtest is not None:
                self.Xtest = self.Xtest.cuda()
            self.toeplitz = gpytorch.settings.use_toeplitz(False)
        else:
            torch.set_default_tensor_type(self.tensor_type)
        self.likelihood = gpytorch.likelihoods.GaussianLikelihood()
        isotropic = kwargs.get("isotropic")
        n_mixtures = kwargs.get("n_mixtures")
        _kernel = gpytorch_kernels.get_kernel(kernel,
                                              input_dim,
                                              use_gpu,
                                              lengthscale=lengthscale,
                                              isotropic=isotropic,
                                              precision=self.precision,
                                              n_mixtures=n_mixtures)
        grid_points_ratio = kwargs.get("grid_points_ratio", 1.)
        self.model = skgprmodel(self.X, self.y, _kernel, self.likelihood,
                                input_dim, grid_points_ratio, self.do_ski)
        if use_gpu:
            self.model.cuda()
        self.iterations = iterations
        self.num_batches = kwargs.get("num_batches", 1)
        self.learning_rate = learning_rate

        self.noise_all = []
        if kernel == "Spectral":
            self.scales, self.means, self.weights = [], [], []
            self.hyperparams = {
                "scales": self.scales,
                "means": self.means,
                "weights": self.weights,
                "noise": self.noise_all,
                "maxdim": max(self.fulldims)
            }
        else:
            self.lscales = []
            self.hyperparams = {
                "lengthscale": self.lscales,
                "noise": self.noise_all,
            }
        self.verbose = verbose
Exemplo n.º 4
0
 def __init__(self,
              X,
              y,
              Xtest=None,
              kernel='RBF',
              lengthscale=None,
              sparse=False,
              indpoints=None,
              learning_rate=5e-2,
              iterations=1000,
              use_gpu=False,
              verbose=1,
              seed=0,
              **kwargs):
     """
     Initiates reconstructor parameters
     and pre-processes training and test data arrays
     """
     self.precision = kwargs.get("precision", "double")
     if self.precision == 'single':
         self.tensor_type = torch.FloatTensor
         self.tensor_type_gpu = torch.cuda.FloatTensor
     else:
         self.tensor_type = torch.DoubleTensor
         self.tensor_type_gpu = torch.cuda.DoubleTensor
     self.verbose = verbose
     torch.manual_seed(seed)
     pyro.set_rng_seed(seed)
     pyro.clear_param_store()
     if use_gpu and torch.cuda.is_available():
         torch.cuda.empty_cache()
         torch.cuda.manual_seed_all(seed)
         torch.backends.cudnn.deterministic = True
         torch.backends.cudnn.benchmark = False
         torch.set_default_tensor_type(self.tensor_type_gpu)
         use_gpu = True
     else:
         torch.set_default_tensor_type(self.tensor_type)
         use_gpu = False
     input_dim = np.ndim(y)
     self.X, self.y = gprutils.prepare_training_data(
         X, y, precision=self.precision)
     self.do_sparse = sparse
     if lengthscale is None and not kwargs.get("isotropic"):
         lengthscale = [[0. for l in range(input_dim)],
                        [np.mean(y.shape) / 2 for l in range(input_dim)]
                        ]  # TODO Make separate lscale for each dim
     elif lengthscale is None and kwargs.get("isotropic"):
         lengthscale = [0., np.mean(y.shape) / 2]
     kernel = pyro_kernels.get_kernel(kernel,
                                      input_dim,
                                      lengthscale,
                                      use_gpu,
                                      amplitude=kwargs.get('amplitude'),
                                      precision=self.precision)
     if Xtest is not None:
         self.fulldims = Xtest.shape[1:]
     else:
         self.fulldims = X.shape[1:]
     if Xtest is not None:
         self.Xtest = gprutils.prepare_test_data(Xtest,
                                                 precision=self.precision)
     else:
         self.Xtest = Xtest
     if use_gpu:
         self.X = self.X.cuda()
         self.y = self.y.cuda()
         if self.Xtest is not None:
             self.Xtest = self.Xtest.cuda()
     if not self.do_sparse:
         self.model = gp.models.GPRegression(self.X, self.y, kernel)
     else:
         if indpoints is None:
             indpoints = len(self.X) // 10
             indpoints = indpoints + 1 if indpoints == 0 else indpoints
         else:
             indpoints = len(
                 self.X) if indpoints > len(self.X) else indpoints
         Xu = self.X[::len(self.X) // indpoints]
         if self.verbose == 2:
             print(
                 "# of inducing points for sparse GP regression: {}".format(
                     len(Xu)))
         self.model = gp.models.SparseGPRegression(self.X,
                                                   self.y,
                                                   kernel,
                                                   Xu,
                                                   jitter=1.0e-5)
     if use_gpu:
         self.model.cuda()
     self.learning_rate = learning_rate
     self.iterations = iterations
     self.hyperparams = {}
     self.indpoints_all = []
     self.lscales, self.noise_all, self.amp_all = [], [], []
     self.hyperparams = {
         "lengthscale": self.lscales,
         "noise": self.noise_all,
         "variance": self.amp_all,
         "inducing_points": self.indpoints_all
     }
Exemplo n.º 5
0
    def __init__(self,
                 X,
                 y,
                 Xtest=None,
                 kernel='RBF',
                 lengthscale=None,
                 independent=False,
                 learning_rate=.1,
                 iterations=50,
                 use_gpu=1,
                 verbose=1,
                 seed=0,
                 **kwargs):
        """
        Initiates reconstructor parameters
        and pre-processes training and test data arrays
        """
        self.precision = kwargs.get("precision", "double")
        if self.precision == 'single':
            self.tensor_type = torch.FloatTensor
            self.tensor_type_gpu = torch.cuda.FloatTensor
        else:
            self.tensor_type = torch.DoubleTensor
            self.tensor_type_gpu = torch.cuda.DoubleTensor
        torch.manual_seed(seed)
        if use_gpu and torch.cuda.is_available():
            torch.cuda.empty_cache()
            torch.cuda.manual_seed_all(seed)
            torch.backends.cudnn.deterministic = True
            torch.backends.cudnn.benchmark = False
            torch.set_default_tensor_type(self.tensor_type_gpu)
        input_dim = np.ndim(y) - 1
        X, y = gprutils.prepare_training_data(X,
                                              y,
                                              vector_valued=True,
                                              precision=self.precision)
        num_tasks = y.shape[-1]
        if Xtest is not None:
            self.fulldims = Xtest.shape[1:] + (num_tasks, )
        else:
            self.fulldims = X.shape[1:] + (num_tasks, )
        if Xtest is not None:
            Xtest = gprutils.prepare_test_data(Xtest, precision=self.precision)
        self.X, self.y, self.Xtest = X, y, Xtest
        self.toeplitz = gpytorch.settings.use_toeplitz(True)
        maxroot = kwargs.get("maxroot", 100)
        self.maxroot = gpytorch.settings.max_root_decomposition_size(maxroot)
        if use_gpu and torch.cuda.is_available():
            self.X, self.y = self.X.cuda(), self.y.cuda()
            if self.Xtest is not None:
                self.Xtest = self.Xtest.cuda()
            self.toeplitz = gpytorch.settings.use_toeplitz(False)
        else:
            torch.set_default_tensor_type(self.tensor_type)
        self.likelihood = gpytorch.likelihoods.MultitaskGaussianLikelihood(
            num_tasks)
        isotropic = kwargs.get("isotropic")
        _kernel = gpytorch_kernels.get_kernel(kernel,
                                              input_dim,
                                              use_gpu,
                                              lengthscale=lengthscale,
                                              isotropic=isotropic,
                                              precision=self.precision)

        if not independent:
            self.model = vgprmodel(self.X, self.y, _kernel, self.likelihood,
                                   num_tasks)
        else:
            self.model = ivgprmodel(self.X, self.y, _kernel, self.likelihood,
                                    num_tasks)

        if use_gpu:
            self.model.cuda()
        self.iterations = iterations
        self.num_batches = kwargs.get("num_batches", 1)
        self.learning_rate = learning_rate
        self.independent = independent
        self.lscales = []
        self.hyperparams = {
            "lengthscale": self.lscales,  # need to add noise as well
        }
        self.verbose = verbose
Exemplo n.º 6
0
Arquivo: gpr.py Projeto: MaggieX/GPim
 def __init__(self,
              X,
              y,
              Xtest,
              kernel,
              lengthscale=None,
              indpoints=1000,
              learning_rate=5e-2,
              iterations=1000,
              use_gpu=False,
              verbose=False,
              seed=0,
              **kwargs):
     """
     Initiates reconstructor parameters
     and pre-processes training and test data arrays
     """
     torch.manual_seed(seed)
     pyro.set_rng_seed(seed)
     pyro.clear_param_store()
     if use_gpu and torch.cuda.is_available():
         torch.cuda.empty_cache()
         torch.cuda.manual_seed_all(seed)
         torch.backends.cudnn.deterministic = True
         torch.backends.cudnn.benchmark = False
         torch.set_default_tensor_type(torch.cuda.DoubleTensor)
         use_gpu = True
     else:
         torch.set_default_tensor_type(torch.DoubleTensor)
         use_gpu = False
     input_dim = np.ndim(y)
     self.X, self.y = gprutils.prepare_training_data(X, y)
     if indpoints > len(self.X):
         indpoints = len(self.X)
     Xu = self.X[::len(self.X) // indpoints]
     if lengthscale is None:
         lengthscale = [[0. for l in range(input_dim)],
                        [np.mean(y.shape) / 2 for l in range(input_dim)]]
     kernel = get_kernel(kernel, input_dim,
                         lengthscale, use_gpu,
                         amplitude=kwargs.get('amplitude'))
     self.fulldims = Xtest.shape[1:]
     self.Xtest = gprutils.prepare_test_data(Xtest)
     if use_gpu:
         self.X = self.X.cuda()
         self.y = self.y.cuda()
         self.Xtest = self.Xtest.cuda()
     self.sgpr = gp.models.SparseGPRegression(
         self.X, self.y, kernel, Xu, jitter=1.0e-5)
     print("# of inducing points for GP regression: {}".format(len(Xu)))
     if use_gpu:
         self.sgpr.cuda()
     self.learning_rate = learning_rate
     self.iterations = iterations
     self.hyperparams = {}
     self.indpoints_all = []
     self.lscales, self.noise_all, self.amp_all = [], [], []
     self.hyperparams = {
         "lengthscale": self.lscales,
         "noise": self.noise_all,
         "variance": self.amp_all,
         "inducing_points": self.indpoints_all
     }
     self.verbose = verbose
Exemplo n.º 7
0
R = gprutils.open_edge_points(R, R_true)
# Get sparse and full grid indices
X = gprutils.get_sparse_grid(R)
X_true = gprutils.get_full_grid(R)
dist_edge = [0, 0]  # set to non-zero vals when edge points are not "opened"
# Construct lengthscale constraints for all 3 dimensions
LENGTH_CONSTR = [[float(args.LENGTH_CONSTR_MIN) for i in range(3)],
                 [float(args.LENGTH_CONSTR_MAX) for i in range(3)]]
# Run exploratory analysis
uncert_idx_all, uncert_val_all, mean_all, sd_all, R_all = [], [], [], [], []
if not os.path.exists(args.SAVEDIR): os.makedirs(args.SAVEDIR)
indpts_r = args.INDUCING_POINTS_RATIO
for i in range(args.ESTEPS):
    print('Exploration step {}/{}'.format(i, args.ESTEPS))
    # Make the number of inducing points dependent on the number of datapoints
    indpoints = len(gprutils.prepare_training_data(X, R)[0]) // indpts_r
    # clip to make sure it fits into GPU memory
    indpoints = 2000 if indpoints > 2000 else indpoints
    # Initialize explorer
    bexplorer = gpr.reconstructor(X,
                                  R,
                                  X_true,
                                  args.KERNEL,
                                  LENGTH_CONSTR,
                                  indpoints,
                                  args.LEARNING_RATE,
                                  args.STEPS,
                                  use_gpu=args.USE_GPU)
    # get indices/value of a max uncertainty point
    uncert_idx, uncert_val, mean, sd = bexplorer.step(dist_edge)
    # some safeguards (to not stuck at one point)