Beispiel #1
0
 def __init__(self, train_x, train_y, likelihood, input_dim, params):
     super(ExactGPR, self).__init__(train_x, train_y, likelihood)
     self.mean_module = gpytorch.means.ConstantMean()
     self.covar_module = gpytorch.kernels.ScaleKernel(
         gpytorch.kernels.RBFKernel(ard_num_dims=input_dim,
                                    lengthscale_constraint=LessThan(
                                        params[0])),
         outputscale_constraint=GreaterThan(params[1]))
Beispiel #2
0
 def __init__(self, m, **kwargs):
     # self.m = m
     scale_constraint = LessThan(0.1)
     super(RBFConstraint,
           self).__init__(lengthscale_constraint=scale_constraint, **kwargs)
     outputscale = torch.zeros(
         *self.batch_shape) if len(self.batch_shape) else torch.tensor(0.0)
     self.register_parameter(name="raw_outputscale",
                             parameter=torch.nn.Parameter(outputscale))
     outputscale_constraint = Positive()
     self.register_constraint("raw_outputscale", outputscale_constraint)
     self.register_buffer("m", torch.tensor(m))
Beispiel #3
0
 def __init__(self, train_x, train_y, likelihood, input_dim, params):
     super(SparseGPR, self).__init__(train_x, train_y, likelihood)
     self.mean_module = gpytorch.means.ConstantMean()
     self.covar_module = gpytorch.kernels.ScaleKernel(
         gpytorch.kernels.RBFKernel(ard_num_dims=input_dim,
                                    lengthscale_constraint=LessThan(
                                        params[0])),
         outputscale_constraint=GreaterThan(params[1]))
     # use some training data to initialize the inducing_module
     if train_x is None:
         train_x = CUDA(torch.zeros((1, input_dim)))
     self.inducing_module = gpytorch.kernels.InducingPointKernel(
         self.covar_module, inducing_points=train_x, likelihood=likelihood)
Beispiel #4
0
    def __init__(self, train_x, train_y, likelihood, input_dim, params):
        super(ExactGPR, self).__init__(train_x, train_y, likelihood)
        self.params = params
        self.input_dim = input_dim
        self.lengthscale_prior = None  #gpytorch.priors.GammaPrior(3.0, 6.0)
        self.outputscale_prior = None  #gpytorch.priors.GammaPrior(2.0, 0.15)

        self.mean_module = gpytorch.means.ConstantMean()
        self.covar_module = gpytorch.kernels.ScaleKernel(
            gpytorch.kernels.RBFKernel(
                ard_num_dims=input_dim,
                lengthscale_prior=self.lengthscale_prior,
                lengthscale_constraint=LessThan(self.params[4])),
            outputscale_prior=self.outputscale_prior,
            outputscale_constraint=GreaterThan(self.params[5]))
Beispiel #5
0
    def __init__(self, train_x, train_y, likelihood, input_dim, params):
        super(SampleGPR, self).__init__(train_x, train_y, likelihood)
        self.params = params
        self.input_dim = input_dim
        self.lengthscale_prior = None  #gpytorch.priors.GammaPrior(3.0, 6.0)
        self.outputscale_prior = None  #gpytorch.priors.GammaPrior(2.0, 0.15)

        self.gp_input_dim = input_dim
        #self.feature_extractor = FeatureExtractor(input_dim, self.gp_input_dim)

        self.mean_module = gpytorch.means.ConstantMean()
        self.covar_module = gpytorch.kernels.ScaleKernel(
            gpytorch.kernels.RBFKernel(
                ard_num_dims=self.gp_input_dim,
                lengthscale_prior=self.lengthscale_prior,
                lengthscale_constraint=LessThan(100)),
            outputscale_prior=self.outputscale_prior)
Beispiel #6
0
    def build(self):
        """
        Right now this isn't need by this method
        """
        def prod(iterable):
            return reduce(operator.mul, iterable)

        mass_kernel = RBFKernel(active_dims=1,
                                lengthscale_constraint=GreaterThan(10.))
        time_kernel = RBFKernel(active_dims=0,
                                lengthscale_constraint=GreaterThan(0.1))
        spin_kernels = [
            RBFKernel(active_dims=dimension,
                      lengthscale_constraint=GreaterThan(7))
            for dimension in range(2, 8)
        ]

        class ExactGPModel(gpytorch.models.ExactGP):
            """
            Use the GpyTorch Exact GP
            """
            def __init__(self, train_x, train_y, likelihood):
                """Initialise the model"""
                super(ExactGPModel, self).__init__(train_x, train_y,
                                                   likelihood)
                self.mean_module = gpytorch.means.ZeroMean()
                self.covar_module = gpytorch.kernels.ScaleKernel(
                    time_kernel * mass_kernel * prod(spin_kernels),
                    lengthscale_constraint=gpytorch.constraints.LessThan(0.01))

            def forward(self, x):
                """Run the forward method of the model"""
                mean_x = self.mean_module(x)
                covar_x = self.covar_module(x)
                return gpytorch.distributions.MultivariateNormal(
                    mean_x, covar_x)

        data = np.genfromtxt(
            pkg_resources.resource_filename('heron',
                                            'models/data/gt-M60-F1024.dat'))

        training_x = self.training_x = torch.tensor(data[:, 0:-2] *
                                                    100).float().cuda()
        training_y = self.training_y = torch.tensor(data[:, -2] *
                                                    1e21).float().cuda()
        training_yx = torch.tensor(data[:, -1] * 1e21).float().cuda()

        likelihood = gpytorch.likelihoods.GaussianLikelihood(
            noise_constraint=LessThan(10))
        model = ExactGPModel(training_x, training_y, likelihood)
        model2 = ExactGPModel(training_x, training_yx, likelihood)
        state_vector = pkg_resources.resource_filename(
            'heron', 'models/data/gt-gpytorch.pth')

        model = model.cuda()
        model2 = model2.cuda()
        likelihood = likelihood.cuda()

        model.load_state_dict(torch.load(state_vector))
        model2.load_state_dict(torch.load(state_vector))

        return [model, model2], likelihood