示例#1
0
    def test_input_transform(self):
        # simple test making sure that the input transforms are applied to both
        # train and test inputs
        for dtype, transform_on_train in itertools.product(
            (torch.float, torch.double), (False, True)):
            tkwargs = {"device": self.device, "dtype": dtype}
            train_X = torch.rand(5, 1, **tkwargs)
            train_Y = torch.sin(train_X)
            intf = SimpleInputTransform(transform_on_train)
            model = SimpleGPyTorchModel(train_X, train_Y, input_transform=intf)
            mll = ExactMarginalLogLikelihood(model.likelihood, model)
            fit_gpytorch_model(mll, options={"maxiter": 2})

            test_X = torch.rand(2, 1, **tkwargs)
            model.posterior(test_X)
            # posterior calls model.forward twice, one with training inputs only,
            # other with both train and test inputs
            expected_train = intf(train_X) if transform_on_train else train_X
            expected_test = intf(test_X)
            self.assertTrue(
                torch.equal(model.transformed_call_args[-2], expected_train))
            self.assertTrue(
                torch.equal(
                    model.transformed_call_args[-1],
                    torch.cat([expected_train, expected_test], dim=0),
                ))
示例#2
0
    def testFixedNoiseLCEMGP(self):
        d = 1
        for dtype in (torch.float, torch.double):
            train_x = torch.rand(10, d, device=self.device, dtype=dtype)
            train_y = torch.cos(train_x)
            task_indices = torch.tensor(
                [0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0],
                device=self.device)
            train_x = torch.cat([train_x, task_indices.unsqueeze(-1)], axis=1)
            train_yvar = torch.ones(10, 1, device=self.device,
                                    dtype=dtype) * 0.01

            model = FixedNoiseLCEMGP(train_X=train_x,
                                     train_Y=train_y,
                                     train_Yvar=train_yvar,
                                     task_feature=d)
            mll = ExactMarginalLogLikelihood(model.likelihood, model)
            fit_gpytorch_model(mll, options={"maxiter": 1})

            self.assertIsInstance(model, FixedNoiseLCEMGP)

            test_x = torch.rand(5, d, device=self.device, dtype=dtype)
            task_indices = torch.tensor([0.0, 0.0, 0.0, 0.0, 0.0],
                                        device=self.device,
                                        dtype=dtype)
            test_x = torch.cat(
                [test_x, task_indices.unsqueeze(-1)],
                axis=1,
            )
            self.assertIsInstance(model(test_x), MultivariateNormal)
示例#3
0
def run():

	train_x, train_obj, train_con, best_observed_value_nei = generate_initial_data(n=10)

	# define models for objective and constraint
	model_obj = FixedNoiseGP(train_x, train_obj, train_yvar.expand_as(train_obj)).to(train_x)
	model_con = FixedNoiseGP(train_x, train_con, train_yvar.expand_as(train_con)).to(train_x)
	# combine into a multi-output GP model
	model = ModelListGP(model_obj, model_con)
	mll = SumMarginalLogLikelihood(model.likelihood, model)

	fit_gpytorch_model(mll)

	acqui_gpmean_cons = GPmeanConstrained(model=model,objective=constrained_obj)

	# Forward:
	# X = torch.rand(size=(1,6))
	# acqui_gpmean_cons.forward(X)

	method_opti = "SLSQP" # constraints
	# method_opti = "COBYLA" # constraints
	# method_opti = "L-BFGS-B"

	# Below, num_restarts must be equal to q, otherwise, it fails...
	options = {"batch_limit": 1,"maxiter": 200,"ftol":1e-6,"method":method_opti}
	x_eta_c, eta_c = optimize_acqf(acq_function=acqui_gpmean_cons,bounds=bounds,q=1,num_restarts=1,
																raw_samples=500,return_best_only=True,options=options)

	


	pdb.set_trace()
示例#4
0
    def test_fit_gpytorch_model_singular(self):
        options = {"disp": False, "maxiter": 5}
        for dtype in (torch.float, torch.double):
            X_train = torch.ones(2, 2, device=self.device, dtype=dtype)
            Y_train = torch.zeros(2, 1, device=self.device, dtype=dtype)
            test_likelihood = GaussianLikelihood(
                noise_constraint=GreaterThan(-1e-7, transform=None, initial_value=0.0)
            )
            gp = SingleTaskGP(X_train, Y_train, likelihood=test_likelihood)
            mll = ExactMarginalLogLikelihood(gp.likelihood, gp)
            mll.to(device=self.device, dtype=dtype)
            # this will do multiple retries (and emit warnings, which is desired)
            with warnings.catch_warnings(record=True) as ws, settings.debug(True):
                fit_gpytorch_model(mll, options=options, max_retries=2)
                self.assertTrue(
                    any(issubclass(w.category, NumericalWarning) for w in ws)
                )
            # ensure that we fail if noise ensures that jitter does not help
            gp.likelihood = GaussianLikelihood(
                noise_constraint=Interval(-2, -1, transform=None, initial_value=-1.5)
            )
            mll = ExactMarginalLogLikelihood(gp.likelihood, gp)
            mll.to(device=self.device, dtype=dtype)
            with self.assertLogs(level="DEBUG") as logs:
                fit_gpytorch_model(mll, options=options, max_retries=2)
            self.assertTrue(any("NotPSDError" in log for log in logs.output))
            # ensure we can handle NaNErrors in the optimizer
            with mock.patch.object(SingleTaskGP, "__call__", side_effect=NanError):
                gp = SingleTaskGP(X_train, Y_train, likelihood=test_likelihood)
                mll = ExactMarginalLogLikelihood(gp.likelihood, gp)
                mll.to(device=self.device, dtype=dtype)
                fit_gpytorch_model(
                    mll, options={"disp": False, "maxiter": 1}, max_retries=1
                )
            # ensure we catch NotPSDErrors
            with mock.patch.object(SingleTaskGP, "__call__", side_effect=NotPSDError):
                mll = self._getModel()
                with self.assertLogs(level="DEBUG") as logs:
                    fit_gpytorch_model(mll, max_retries=2)
                for retry in [1, 2]:
                    self.assertTrue(
                        any(
                            f"Fitting failed on try {retry} due to a NotPSDError."
                            in log
                            for log in logs.output
                        )
                    )

            # Failure due to optimization warning

            def optimize_w_warning(mll, **kwargs):
                warnings.warn("Dummy warning.", OptimizationWarning)
                return mll, None

            mll = self._getModel()
            with self.assertLogs(level="DEBUG") as logs, settings.debug(True):
                fit_gpytorch_model(mll, optimizer=optimize_w_warning, max_retries=2)
            self.assertTrue(
                any("Fitting failed on try 1." in log for log in logs.output)
            )
示例#5
0
    def testLCEAGP(self):
        for dtype in (torch.float, torch.double):
            train_X = torch.tensor(
                [[0.0, 0.0, 0.0, 0.0], [1.0, 1.0, 1.0, 1.0],
                 [2.0, 2.0, 2.0, 2.0]],
                device=self.device,
                dtype=dtype,
            )
            train_Y = torch.tensor([[1.0], [2.0], [3.0]],
                                   device=self.device,
                                   dtype=dtype)
            train_Yvar = 0.01 * torch.ones(
                3, 1, device=self.device, dtype=dtype)
            # Test setting attributes
            decomposition = {"1": [0, 1], "2": [2, 3]}

            # test instantiate model
            model = LCEAGP(
                train_X=train_X,
                train_Y=train_Y,
                train_Yvar=train_Yvar,
                decomposition=decomposition,
            )
            mll = ExactMarginalLogLikelihood(model.likelihood, model)
            fit_gpytorch_model(mll, options={"maxiter": 1})

            self.assertIsInstance(model, LCEAGP)
            self.assertIsInstance(model.covar_module, LCEAKernel)
            self.assertDictEqual(model.decomposition, decomposition)

            test_x = torch.rand(5, 4, device=self.device, dtype=dtype)
            posterior = model(test_x)
            self.assertIsInstance(posterior, MultivariateNormal)
示例#6
0
 def test_fit_gpytorch_model_sequential(self):
     options = {"disp": False, "maxiter": 1}
     for double, kind, outcome_transform in product(
         (False, True),
         ("SingleTaskGP", "FixedNoiseGP", "HeteroskedasticSingleTaskGP"),
         (False, True),
     ):
         with warnings.catch_warnings(record=True) as ws, settings.debug(True):
             mll = self._getBatchedModel(
                 kind=kind, double=double, outcome_transform=outcome_transform
             )
             mll = fit_gpytorch_model(mll, options=options, max_retries=1)
             mll = self._getBatchedModel(
                 kind=kind, double=double, outcome_transform=outcome_transform
             )
             mll = fit_gpytorch_model(
                 mll, options=options, sequential=True, max_retries=1
             )
             mll = self._getBatchedModel(
                 kind=kind, double=double, outcome_transform=outcome_transform
             )
             mll = fit_gpytorch_model(
                 mll, options=options, sequential=False, max_retries=1
             )
             if kind == "HeteroskedasticSingleTaskGP":
                 self.assertTrue(
                     any(issubclass(w.category, BotorchWarning) for w in ws)
                 )
                 self.assertTrue(
                     any(
                         "Failed to convert ModelList to batched model"
                         in str(w.message)
                         for w in ws
                     )
                 )
示例#7
0
 def test_fit_gpytorch_model_sequential(self, cuda=False):
     options = {"disp": False, "maxiter": 1}
     for double in (False, True):
         for kind in ("SingleTaskGP", "FixedNoiseGP",
                      "HeteroskedasticSingleTaskGP"):
             with warnings.catch_warnings():
                 warnings.filterwarnings("ignore",
                                         category=OptimizationWarning)
                 mll = self._getBatchedModel(kind=kind,
                                             double=double,
                                             cuda=cuda)
                 mll = fit_gpytorch_model(mll,
                                          options=options,
                                          max_retries=1)
                 mll = self._getBatchedModel(kind=kind,
                                             double=double,
                                             cuda=cuda)
                 mll = fit_gpytorch_model(mll,
                                          options=options,
                                          sequential=True,
                                          max_retries=1)
                 mll = self._getBatchedModel(kind=kind,
                                             double=double,
                                             cuda=cuda)
                 mll = fit_gpytorch_model(mll,
                                          options=options,
                                          sequential=False,
                                          max_retries=1)
示例#8
0
    def test_SACGP(self):
        train_X = torch.tensor(
            [[0.0, 0.0, 0.0, 0.0], [1.0, 1.0, 1.0, 1.0], [2.0, 2.0, 2.0, 2.0]]
        )
        train_Y = torch.tensor([[1.0], [2.0], [3.0]])
        train_Yvar = 0.01 * torch.ones(3, 1, dtype=torch.double)
        self.decomposition = {"1": [0, 3], "2": [1, 2]}

        self.model = SACGP(train_X, train_Y, train_Yvar, self.decomposition)
        mll = ExactMarginalLogLikelihood(self.model.likelihood, self.model)
        fit_gpytorch_model(mll, options={"maxiter": 1})

        self.assertIsInstance(self.model, FixedNoiseGP)
        self.assertDictEqual(self.model.decomposition, self.decomposition)
        self.assertIsInstance(self.model.mean_module, ConstantMean)
        self.assertIsInstance(self.model.covar_module, SACKernel)

        # test number of named parameters
        num_of_mean = 0
        num_of_lengthscales = 0
        num_of_outputscales = 0
        for param_name, param in self.model.named_parameters():
            if param_name == "mean_module.constant":
                num_of_mean += param.data.shape.numel()
            elif "raw_lengthscale" in param_name:
                num_of_lengthscales += param.data.shape.numel()
            elif "raw_outputscale" in param_name:
                num_of_outputscales += param.data.shape.numel()
        self.assertEqual(num_of_mean, 1)
        self.assertEqual(num_of_lengthscales, 2)
        self.assertEqual(num_of_outputscales, 2)

        test_x = torch.rand(5, 4)
        posterior = self.model(test_x)
        self.assertIsInstance(posterior, MultivariateNormal)
示例#9
0
    def test_fit_gpytorch_model(self,
                                cuda=False,
                                optimizer=fit_gpytorch_scipy):
        options = {"disp": False, "maxiter": 5}
        for double in (False, True):
            mll = self._getModel(double=double, cuda=cuda)
            mll = fit_gpytorch_model(mll, optimizer=optimizer, options=options)
            model = mll.model
            # Make sure all of the parameters changed
            self.assertGreater(model.likelihood.raw_noise.abs().item(), 1e-3)
            self.assertLess(model.mean_module.constant.abs().item(), 0.1)
            self.assertGreater(
                model.covar_module.base_kernel.raw_lengthscale.abs().item(),
                0.1)
            self.assertGreater(model.covar_module.raw_outputscale.abs().item(),
                               1e-3)

            # test overriding the default bounds with user supplied bounds
            mll = self._getModel(double=double, cuda=cuda)
            mll = fit_gpytorch_model(
                mll,
                optimizer=optimizer,
                options=options,
                bounds={"likelihood.noise_covar.raw_noise": (1e-1, None)},
            )
            model = mll.model
            self.assertGreaterEqual(model.likelihood.raw_noise.abs().item(),
                                    1e-1)
            self.assertLess(model.mean_module.constant.abs().item(), 0.1)
            self.assertGreater(
                model.covar_module.base_kernel.raw_lengthscale.abs().item(),
                0.1)
            self.assertGreater(model.covar_module.raw_outputscale.abs().item(),
                               1e-3)

            # test tracking iterations
            mll = self._getModel(double=double, cuda=cuda)
            if optimizer is fit_gpytorch_torch:
                options["disp"] = True
            mll, iterations = optimizer(mll,
                                        options=options,
                                        track_iterations=True)
            self.assertEqual(len(iterations), options["maxiter"])
            self.assertIsInstance(iterations[0], OptimizationIteration)

            # test extra param that does not affect loss
            options["disp"] = False
            mll = self._getModel(double=double, cuda=cuda)
            mll.register_parameter(
                "dummy_param",
                torch.nn.Parameter(
                    torch.tensor(
                        [5.0],
                        dtype=torch.double if double else torch.float,
                        device=torch.device("cuda" if cuda else "cpu"),
                    )),
            )
            mll = fit_gpytorch_model(mll, optimizer=optimizer, options=options)
            self.assertTrue(mll.dummy_param.grad is None)
示例#10
0
    def test_gp(self, cuda=False):
        for batch_shape in (torch.Size([]), torch.Size([2])):
            for num_outputs in (1, 2):
                for double in (False, True):
                    tkwargs = {
                        "device":
                        torch.device("cuda") if cuda else torch.device("cpu"),
                        "dtype":
                        torch.double if double else torch.float,
                    }
                    model, _ = self._get_model_and_data(
                        batch_shape=batch_shape,
                        num_outputs=num_outputs,
                        **tkwargs)
                    mll = ExactMarginalLogLikelihood(model.likelihood,
                                                     model).to(**tkwargs)
                    fit_gpytorch_model(mll, options={"maxiter": 1})
                    # test init
                    self.assertIsInstance(model.mean_module, ConstantMean)
                    self.assertIsInstance(model.covar_module, ScaleKernel)
                    matern_kernel = model.covar_module.base_kernel
                    self.assertIsInstance(matern_kernel, MaternKernel)
                    self.assertIsInstance(matern_kernel.lengthscale_prior,
                                          GammaPrior)

                    # Test forward
                    test_x = torch.rand(batch_shape + torch.Size([3, 1]),
                                        **tkwargs)
                    posterior = model(test_x)
                    self.assertIsInstance(posterior, MultivariateNormal)

                    # test param sizes
                    params = dict(model.named_parameters())
                    for p in params:
                        self.assertEqual(
                            params[p].numel(),
                            num_outputs *
                            torch.tensor(batch_shape).prod().item(),
                        )

                    # test posterior
                    # test non batch evaluation
                    X = torch.rand(batch_shape + torch.Size([3, 1]), **tkwargs)
                    posterior = model.posterior(X)
                    self.assertIsInstance(posterior, GPyTorchPosterior)
                    self.assertEqual(
                        posterior.mean.shape,
                        batch_shape + torch.Size([3, num_outputs]))
                    # test batch evaluation
                    X = torch.rand(
                        torch.Size([2]) + batch_shape + torch.Size([3, 1]),
                        **tkwargs)
                    posterior = model.posterior(X)
                    self.assertIsInstance(posterior, GPyTorchPosterior)
                    self.assertEqual(
                        posterior.mean.shape,
                        torch.Size([2]) + batch_shape +
                        torch.Size([3, num_outputs]),
                    )
示例#11
0
    def test_gp(self):
        for (iteration_fidelity, data_fidelity) in self.FIDELITY_TEST_PAIRS:
            num_dim = 1 + (iteration_fidelity is not None) + (data_fidelity
                                                              is not None)
            for batch_shape, num_outputs, dtype, lin_trunc in itertools.product(
                (torch.Size(), torch.Size([2])),
                (1, 2),
                (torch.float, torch.double),
                (False, True),
            ):
                tkwargs = {"device": self.device, "dtype": dtype}
                model, _ = _get_model_and_data(
                    iteration_fidelity=iteration_fidelity,
                    data_fidelity=data_fidelity,
                    batch_shape=batch_shape,
                    num_outputs=num_outputs,
                    lin_truncated=lin_trunc,
                    **tkwargs,
                )
                mll = ExactMarginalLogLikelihood(model.likelihood, model)
                mll.to(**tkwargs)
                with warnings.catch_warnings():
                    warnings.filterwarnings("ignore",
                                            category=OptimizationWarning)
                    fit_gpytorch_model(mll,
                                       sequential=False,
                                       options={"maxiter": 1})

                # test init
                self.assertIsInstance(model.mean_module, ConstantMean)
                self.assertIsInstance(model.covar_module, ScaleKernel)

                # test param sizes
                params = dict(model.named_parameters())
                for p in params:
                    self.assertEqual(
                        params[p].numel(),
                        num_outputs * torch.tensor(batch_shape).prod().item(),
                    )

                # test posterior
                # test non batch evaluation
                X = torch.rand(batch_shape + torch.Size([3, num_dim]),
                               **tkwargs)
                posterior = model.posterior(X)
                self.assertIsInstance(posterior, GPyTorchPosterior)
                self.assertEqual(posterior.mean.shape,
                                 batch_shape + torch.Size([3, num_outputs]))
                # test batch evaluation
                X = torch.rand(
                    torch.Size([2]) + batch_shape + torch.Size([3, num_dim]),
                    **tkwargs)
                posterior = model.posterior(X)
                self.assertIsInstance(posterior, GPyTorchPosterior)
                self.assertEqual(
                    posterior.mean.shape,
                    torch.Size([2]) + batch_shape +
                    torch.Size([3, num_outputs]),
                )
示例#12
0
 def test_fit_w_maxiter(self):
     options = {"maxiter": 1}
     with warnings.catch_warnings(record=True) as ws, settings.debug(True):
         mll = self._getModel()
         fit_gpytorch_model(mll, options=options, max_retries=3)
         mll = self._getBatchedModel()
         fit_gpytorch_model(mll, options=options, max_retries=3)
     self.assertFalse(any("ITERATIONS REACHED LIMIT" in str(w.message) for w in ws))
示例#13
0
 def _get_model(self, batch_shape, num_outputs, likelihood=None, **tkwargs):
     train_x, train_y = _get_random_data(
         batch_shape=batch_shape, num_outputs=num_outputs, **tkwargs
     )
     model = SingleTaskGP(train_X=train_x, train_Y=train_y, likelihood=likelihood)
     mll = ExactMarginalLogLikelihood(model.likelihood, model).to(**tkwargs)
     fit_gpytorch_model(mll, options={"maxiter": 1})
     return model
示例#14
0
    def fit_model(self):
        """
        If no state_dict exists, fits the model and saves the state_dict.
        Otherwise, constructs the model but uses the fit given by the state_dict.
        """
        # read the data
        data_list = list()
        for i in range(1, 31):
            data_file = os.path.join(script_dir, "port_evals",
                                     "port_n=100_seed=%d" % i)
            data_list.append(torch.load(data_file))

        # join the data together
        X = torch.cat([data_list[i]["X"] for i in range(len(data_list))],
                      dim=0).squeeze(-2)
        Y = torch.cat([data_list[i]["Y"] for i in range(len(data_list))],
                      dim=0).squeeze(-2)

        # fit GP
        noise_prior = GammaPrior(1.1, 0.5)
        noise_prior_mode = (noise_prior.concentration - 1) / noise_prior.rate
        likelihood = GaussianLikelihood(
            noise_prior=noise_prior,
            batch_shape=[],
            noise_constraint=GreaterThan(
                0.000005,  # minimum observation noise assumed in the GP model
                transform=None,
                initial_value=noise_prior_mode,
            ),
        )

        # We save the state dict to avoid fitting the GP every time which takes ~3 mins
        try:
            state_dict = torch.load(
                os.path.join(script_dir, "portfolio_surrogate_state_dict.pt"))
            model = SingleTaskGP(X,
                                 Y,
                                 likelihood,
                                 outcome_transform=Standardize(m=1))
            model.load_state_dict(state_dict)
        except FileNotFoundError:
            model = SingleTaskGP(X,
                                 Y,
                                 likelihood,
                                 outcome_transform=Standardize(m=1))
            mll = ExactMarginalLogLikelihood(model.likelihood, model)
            from time import time

            start = time()
            fit_gpytorch_model(mll)
            print("fitting took %s seconds" % (time() - start))
            torch.save(
                model.state_dict(),
                os.path.join(script_dir, "portfolio_surrogate_state_dict.pt"),
            )
        self.model = model
示例#15
0
 def _get_model(self, batch_shape, num_outputs, likelihood=None, **tkwargs):
     train_x, train_y = _get_random_data(batch_shape=batch_shape,
                                         num_outputs=num_outputs,
                                         **tkwargs)
     model = SingleTaskGP(train_X=train_x,
                          train_Y=train_y,
                          likelihood=likelihood)
     mll = ExactMarginalLogLikelihood(model.likelihood, model).to(**tkwargs)
     fit_gpytorch_model(mll, options={"maxiter": 1})
     return model
示例#16
0
 def _get_model(self, batch_shape, num_outputs, **tkwargs):
     train_x, train_y = _get_random_data(batch_shape=batch_shape,
                                         num_outputs=num_outputs,
                                         **tkwargs)
     train_yvar = (0.1 + 0.1 * torch.rand_like(train_y))**2
     model = HeteroskedasticSingleTaskGP(train_X=train_x,
                                         train_Y=train_y,
                                         train_Yvar=train_yvar)
     mll = ExactMarginalLogLikelihood(model.likelihood, model).to(**tkwargs)
     fit_gpytorch_model(mll, options={"maxiter": 1})
     return model
示例#17
0
 def _get_model(self, batch_shape, num_outputs, **tkwargs):
     train_x, train_y = _get_random_data(
         batch_shape=batch_shape, num_outputs=num_outputs, **tkwargs
     )
     train_yvar = (0.1 + 0.1 * torch.rand_like(train_y)) ** 2
     model = HeteroskedasticSingleTaskGP(
         train_X=train_x, train_Y=train_y, train_Yvar=train_yvar
     )
     mll = ExactMarginalLogLikelihood(model.likelihood, model).to(**tkwargs)
     fit_gpytorch_model(mll, options={"maxiter": 1})
     return model
示例#18
0
    def test_fit_gpytorch_model(self, cuda=False, optimizer=fit_gpytorch_scipy):
        options = {"disp": False, "maxiter": 5}
        for double in (False, True):
            mll = self._getModel(double=double, cuda=cuda)
            mll = fit_gpytorch_model(mll, optimizer=optimizer, options=options)
            model = mll.model
            # Make sure all of the parameters changed
            self.assertGreater(model.likelihood.raw_noise.abs().item(), 1e-3)
            self.assertLess(model.mean_module.constant.abs().item(), 0.1)
            self.assertGreater(
                model.covar_module.base_kernel.raw_lengthscale.abs().item(), 0.1
            )
            self.assertGreater(model.covar_module.raw_outputscale.abs().item(), 1e-3)

            # test overriding the default bounds with user supplied bounds
            mll = self._getModel(double=double, cuda=cuda)
            mll = fit_gpytorch_model(
                mll,
                optimizer=optimizer,
                options=options,
                bounds={"likelihood.noise_covar.raw_noise": (1e-1, None)},
            )
            model = mll.model
            self.assertGreaterEqual(model.likelihood.raw_noise.abs().item(), 1e-1)
            self.assertLess(model.mean_module.constant.abs().item(), 0.1)
            self.assertGreater(
                model.covar_module.base_kernel.raw_lengthscale.abs().item(), 0.1
            )
            self.assertGreater(model.covar_module.raw_outputscale.abs().item(), 1e-3)

            # test tracking iterations
            mll = self._getModel(double=double, cuda=cuda)
            if optimizer is fit_gpytorch_torch:
                options["disp"] = True
            mll, iterations = optimizer(mll, options=options, track_iterations=True)
            self.assertEqual(len(iterations), options["maxiter"])
            self.assertIsInstance(iterations[0], OptimizationIteration)

            # test extra param that does not affect loss
            options["disp"] = False
            mll = self._getModel(double=double, cuda=cuda)
            mll.register_parameter(
                "dummy_param",
                torch.nn.Parameter(
                    torch.tensor(
                        [5.0],
                        dtype=torch.double if double else torch.float,
                        device=torch.device("cuda" if cuda else "cpu"),
                    )
                ),
            )
            mll = fit_gpytorch_model(mll, optimizer=optimizer, options=options)
            self.assertTrue(mll.dummy_param.grad is None)
示例#19
0
 def test_set_transformed_inputs(self):
     # This intended to catch https://github.com/pytorch/botorch/issues/1078.
     # More general testing of _set_transformed_inputs is done under ModelListGP.
     X = torch.rand(5, 2)
     Y = X**2
     for tf_class in [Normalize, InputStandardize]:
         intf = tf_class(d=2)
         model = SingleTaskGP(X, Y, input_transform=intf)
         mll = ExactMarginalLogLikelihood(model.likelihood, model)
         fit_gpytorch_model(mll, options={"maxiter": 2})
         tf_X = intf(X)
         self.assertEqual(X.shape, tf_X.shape)
示例#20
0
 def test_fit_gpytorch_model_singular(self, cuda=False):
     options = {"disp": False, "maxiter": 5}
     device = torch.device("cuda") if cuda else torch.device("cpu")
     for dtype in (torch.float, torch.double):
         X_train = torch.rand(2, 2, device=device, dtype=dtype)
         Y_train = torch.zeros(2, device=device, dtype=dtype)
         test_likelihood = GaussianLikelihood(noise_constraint=GreaterThan(
             -1.0, transform=None, initial_value=0.0))
         gp = SingleTaskGP(X_train, Y_train, likelihood=test_likelihood)
         mll = ExactMarginalLogLikelihood(gp.likelihood, gp)
         mll.to(device=device, dtype=dtype)
         # this will do multiple retries (and emit warnings, which is desired)
         fit_gpytorch_model(mll, options=options, max_retries=2)
示例#21
0
    def _sample(self, candidates: Optional[np.array] = None) -> np.array:
        if len(self.X_observed) < self.num_initial_random_draws:
            return self.initial_sampler.sample(candidates=candidates)
        else:
            z_observed = torch.Tensor(self.transform_outputs(self.y_observed.numpy()))

            with torch.no_grad():
                # both (n, 1)
                #mu_pred, sigma_pred = self.thompson_sampling.prior(self.X_observed)
                mu_pred, sigma_pred = self.initial_sampler.prior.predict(self.X_observed)
                mu_pred = torch.Tensor(mu_pred)
                sigma_pred = torch.Tensor(sigma_pred)

            # (n, 1)
            r_observed = residual_transform(z_observed, mu_pred, sigma_pred)

            # build and fit GP on residuals
            gp = SingleTaskGP(
                train_X=self.X_observed,
                train_Y=r_observed,
                likelihood=GaussianLikelihood(noise_constraint=GreaterThan(1e-3)),
            )
            mll = ExactMarginalLogLikelihood(gp.likelihood, gp)
            fit_gpytorch_model(mll)

            acq = ShiftedExpectedImprovement(
                model=gp,
                best_f=z_observed.min(dim=0).values,
                mean_std_predictor=self.initial_sampler.prior.predict,
                maximize=False,
            )

            if candidates is None:
                candidate, acq_value = optimize_acqf(
                    acq,
                    bounds=self.bounds_tensor,
                    q=1,
                    num_restarts=5,
                    raw_samples=100,
                )
                # import matplotlib.pyplot as plt
                # x = torch.linspace(-1, 1).unsqueeze(dim=-1)
                # x = torch.cat((x, x * 0), dim=1)
                # plt.plot(x[:, 0].flatten().tolist(), acq(x.unsqueeze(dim=1)).tolist())
                # plt.show()
                return candidate[0]
            else:
                # (N,)
                ei = acq(torch.Tensor(candidates).unsqueeze(dim=-2))
                return torch.Tensor(candidates[ei.argmax()])
示例#22
0
    def test_FixedNoiseMultiTaskGP_single_output(self, cuda=False):
        for double in (False, True):
            tkwargs = {
                "device": torch.device("cuda") if cuda else torch.device("cpu"),
                "dtype": torch.double if double else torch.float,
            }
            model = _get_fixed_noise_model_single_output(**tkwargs)
            self.assertIsInstance(model, FixedNoiseMultiTaskGP)
            self.assertIsInstance(model.likelihood, FixedNoiseGaussianLikelihood)
            self.assertIsInstance(model.mean_module, ConstantMean)
            self.assertIsInstance(model.covar_module, ScaleKernel)
            matern_kernel = model.covar_module.base_kernel
            self.assertIsInstance(matern_kernel, MaternKernel)
            self.assertIsInstance(matern_kernel.lengthscale_prior, GammaPrior)
            self.assertIsInstance(model.task_covar_module, IndexKernel)
            self.assertEqual(model._rank, 2)
            self.assertEqual(
                model.task_covar_module.covar_factor.shape[-1], model._rank
            )

            # test model fitting
            mll = ExactMarginalLogLikelihood(model.likelihood, model)
            mll = fit_gpytorch_model(mll, options={"maxiter": 1})

            # test posterior
            test_x = torch.rand(2, 1, **tkwargs)
            posterior_f = model.posterior(test_x)
            self.assertIsInstance(posterior_f, GPyTorchPosterior)
            self.assertIsInstance(posterior_f.mvn, MultivariateNormal)

            # test posterior (batch eval)
            test_x = torch.rand(3, 2, 1, **tkwargs)
            posterior_f = model.posterior(test_x)
            self.assertIsInstance(posterior_f, GPyTorchPosterior)
            self.assertIsInstance(posterior_f.mvn, MultivariateNormal)
示例#23
0
    def test_FixedNoiseGP(self, cuda=False):
        for batch_shape in (torch.Size([]), torch.Size([2])):
            for num_outputs in (1, 2):
                for double in (False, True):
                    tkwargs = {
                        "device": torch.device("cuda") if cuda else torch.device("cpu"),
                        "dtype": torch.double if double else torch.float,
                    }
                    model = self._get_model(
                        batch_shape=batch_shape,
                        num_outputs=num_outputs,
                        n=10,
                        **tkwargs
                    )
                    self.assertIsInstance(model, FixedNoiseGP)
                    self.assertIsInstance(
                        model.likelihood, FixedNoiseGaussianLikelihood
                    )
                    self.assertIsInstance(model.mean_module, ConstantMean)
                    self.assertIsInstance(model.covar_module, ScaleKernel)
                    matern_kernel = model.covar_module.base_kernel
                    self.assertIsInstance(matern_kernel, MaternKernel)
                    self.assertIsInstance(matern_kernel.lengthscale_prior, GammaPrior)

                    # test model fitting
                    mll = ExactMarginalLogLikelihood(model.likelihood, model)
                    mll = fit_gpytorch_model(mll, options={"maxiter": 1})

                    # Test forward
                    test_x = torch.rand(batch_shape + torch.Size([3, 1]), **tkwargs)
                    posterior = model(test_x)
                    self.assertIsInstance(posterior, MultivariateNormal)

                    # TODO: Pass observation noise into posterior
                    # posterior_obs = model.posterior(test_x, observation_noise=True)
                    # self.assertTrue(
                    #     torch.allclose(
                    #         posterior_f.variance + 0.01,
                    #         posterior_obs.variance
                    #     )
                    # )

                    # test posterior
                    # test non batch evaluation
                    X = torch.rand(batch_shape + torch.Size([3, 1]), **tkwargs)
                    posterior = model.posterior(X)
                    self.assertIsInstance(posterior, GPyTorchPosterior)
                    self.assertEqual(
                        posterior.mean.shape, batch_shape + torch.Size([3, num_outputs])
                    )
                    # test batch evaluation
                    X = torch.rand(
                        torch.Size([2]) + batch_shape + torch.Size([3, 1]), **tkwargs
                    )
                    posterior = model.posterior(X)
                    self.assertIsInstance(posterior, GPyTorchPosterior)
                    self.assertEqual(
                        posterior.mean.shape,
                        torch.Size([2]) + batch_shape + torch.Size([3, num_outputs]),
                    )
示例#24
0
    def test_MultiTaskGP_single_output(self, cuda=False):
        for double in (False, True):
            tkwargs = {
                "device":
                torch.device("cuda") if cuda else torch.device("cpu"),
                "dtype": torch.double if double else torch.float,
            }
            model = _get_model_single_output(**tkwargs)
            self.assertIsInstance(model, MultiTaskGP)
            self.assertIsInstance(model.likelihood, GaussianLikelihood)
            self.assertIsInstance(model.mean_module, ConstantMean)
            self.assertIsInstance(model.covar_module, ScaleKernel)
            matern_kernel = model.covar_module.base_kernel
            self.assertIsInstance(matern_kernel, MaternKernel)
            self.assertIsInstance(matern_kernel.lengthscale_prior, GammaPrior)
            self.assertIsInstance(model.task_covar_module, IndexKernel)
            self.assertEqual(model._rank, 2)
            self.assertEqual(model.task_covar_module.covar_factor.shape[-1],
                             model._rank)

            # test model fitting
            mll = ExactMarginalLogLikelihood(model.likelihood, model)
            mll = fit_gpytorch_model(mll, options={"maxiter": 1})

            # test posterior
            test_x = torch.rand(2, 1, **tkwargs)
            posterior_f = model.posterior(test_x)
            self.assertIsInstance(posterior_f, GPyTorchPosterior)
            self.assertIsInstance(posterior_f.mvn, MultivariateNormal)

            # test posterior (batch eval)
            test_x = torch.rand(3, 2, 1, **tkwargs)
            posterior_f = model.posterior(test_x)
            self.assertIsInstance(posterior_f, GPyTorchPosterior)
            self.assertIsInstance(posterior_f.mvn, MultivariateNormal)
示例#25
0
def get_fitted_model(train_x, train_obj, state_dict=None):
    # initialize and fit model
    model = SingleTaskGP(train_X=train_x, train_Y=train_obj)

    # # initialize likelihood and model
    # likelihood = gpytorch.likelihoods.GaussianLikelihood()
    # model = ExactGPModel(train_x, train_obj, likelihood)
    # model.train()
    # likelihood.train()

    if state_dict is not None:
        model.load_state_dict(state_dict)
    mll = ExactMarginalLogLikelihood(model.likelihood, model)
    mll.to(train_x)
    fit_gpytorch_model(mll)

    return model
示例#26
0
 def test_fit_gpytorch_model_singular(self):
     options = {"disp": False, "maxiter": 5}
     for dtype in (torch.float, torch.double):
         X_train = torch.rand(2, 2, device=self.device, dtype=dtype)
         Y_train = torch.zeros(2, 1, device=self.device, dtype=dtype)
         test_likelihood = GaussianLikelihood(
             noise_constraint=GreaterThan(-1.0, transform=None, initial_value=0.0)
         )
         gp = SingleTaskGP(X_train, Y_train, likelihood=test_likelihood)
         mll = ExactMarginalLogLikelihood(gp.likelihood, gp)
         mll.to(device=self.device, dtype=dtype)
         # this will do multiple retries (and emit warnings, which is desired)
         with warnings.catch_warnings(record=True) as ws, settings.debug(True):
             fit_gpytorch_model(mll, options=options, max_retries=2)
             self.assertTrue(
                 any(issubclass(w.category, OptimizationWarning) for w in ws)
             )
示例#27
0
 def test_fit_gpytorch_model_singular(self):
     options = {"disp": False, "maxiter": 5}
     for dtype in (torch.float, torch.double):
         X_train = torch.ones(2, 2, device=self.device, dtype=dtype)
         Y_train = torch.zeros(2, 1, device=self.device, dtype=dtype)
         test_likelihood = GaussianLikelihood(
             noise_constraint=GreaterThan(-1e-7, transform=None, initial_value=0.0)
         )
         gp = SingleTaskGP(X_train, Y_train, likelihood=test_likelihood)
         mll = ExactMarginalLogLikelihood(gp.likelihood, gp)
         mll.to(device=self.device, dtype=dtype)
         # this will do multiple retries (and emit warnings, which is desired)
         with warnings.catch_warnings(record=True) as ws, settings.debug(True):
             fit_gpytorch_model(mll, options=options, max_retries=2)
             self.assertTrue(
                 any(issubclass(w.category, NumericalWarning) for w in ws)
             )
         # ensure that we fail if noise ensures that jitter does not help
         gp.likelihood = GaussianLikelihood(
             noise_constraint=Interval(-2, -1, transform=None, initial_value=-1.5)
         )
         mll = ExactMarginalLogLikelihood(gp.likelihood, gp)
         mll.to(device=self.device, dtype=dtype)
         with self.assertRaises(NotPSDError):
             fit_gpytorch_model(mll, options=options, max_retries=2)
         # ensure we can handle NaNErrors in the optimizer
         with mock.patch.object(SingleTaskGP, "__call__", side_effect=NanError):
             gp = SingleTaskGP(X_train, Y_train, likelihood=test_likelihood)
             mll = ExactMarginalLogLikelihood(gp.likelihood, gp)
             mll.to(device=self.device, dtype=dtype)
             fit_gpytorch_model(
                 mll, options={"disp": False, "maxiter": 1}, max_retries=1
             )
示例#28
0
文件: main.py 项目: stys/albo
def main():
    function = GardnerTestFunction()

    for j in range(25):

        x, y = generate_initial_data()
        for i in range(50):
            mll, model = initialize_model(x, y)
            fit_gpytorch_model(mll)

            augmented_objective = ClassicAugmentedLagrangianMCObjective(
                objective=lambda y: y[..., 0],
                constraints=[lambda y: y[..., 1]])

            x_new = fit_augmented_objective(model, augmented_objective, x, y)
            y_new = function(x_new)

            x = torch.cat([x, x_new], dim=0)
            y = torch.cat([y, y_new], dim=0)

        np.save(f'results/x_{j}.bin', x)
        np.save(f'results/y_{j}.bin', y)
示例#29
0
def generate_random_gp(dim: int, num_train: int, standardized: bool = True):
    r"""
    Returns a fitted gp trained on random input. Useful for testing purposes.

    Args:
        dim: Input dimension
        num_train: Number of training points
        standardized: If True, the train outcomes are standardized

    Returns:
        A fitted SingleTaskGP model
    """
    if standardized:
        transform = Standardize(m=1)
    else:
        transform = None
    train_X = torch.rand(num_train, dim)
    train_Y = torch.rand(num_train, 1)
    model = SingleTaskGP(train_X, train_Y, outcome_transform=transform)
    mll = ExactMarginalLogLikelihood(model.likelihood, model)
    fit_gpytorch_model(mll)
    return model
示例#30
0
    def testLCEAGP(self):
        train_X = torch.tensor(
            [[0.0, 0.0, 0.0, 0.0], [1.0, 1.0, 1.0, 1.0], [2.0, 2.0, 2.0, 2.0]]
        )
        train_Y = torch.tensor([[1.0], [2.0], [3.0]])
        train_Yvar = 0.01 * torch.ones(3, 1, dtype=torch.double)
        # Test setting attributes
        decomposition = {"1": [0, 1], "2": [2, 3]}

        # test instantiate model
        model = LCEAGP(
            train_X=train_X,
            train_Y=train_Y,
            train_Yvar=train_Yvar,
            decomposition=decomposition,
        )
        mll = ExactMarginalLogLikelihood(model.likelihood, model)
        fit_gpytorch_model(mll, options={"maxiter": 1})

        self.assertIsInstance(model, LCEAGP)
        self.assertIsInstance(model.covar_module, LCEAKernel)
        self.assertDictEqual(model.decomposition, decomposition)
    def fit(self, train_x_, train_y_):
        """
        Fit the Gaussian Process to training data on
        the marginal log likelihood. (refits the model hyperparameters)

        Code based on the following GPyTorch tutorial:
        https://gpytorch.readthedocs.io/en/latest/examples/01_Exact_GPs/Simple_GP_Regression.html#Training-the-model

        :param train_x_: torch.Tensor (n, d)
        :param train_y_: torch.Tensor (n, 1)
        """

        train_X = train_x_.float()
        train_Y = train_y_.float()

        # Update self.train_x and self.train_y
        self.set_train_data(inputs=train_X, targets=train_Y)

        # "Loss" for GPs - the marginal log likelihood
        mll = gpytorch.mlls.ExactMarginalLogLikelihood(self.likelihood, self)
        mll = mll.to(train_X)

        fit_gpytorch_model(mll)
示例#32
0
    def _sample(self, candidates: Optional[np.array] = None) -> np.array:
        if len(self.X_observed) < self.num_initial_random_draws:
            return self.initial_sampler.sample(candidates=candidates)
        else:
            z_observed = torch.Tensor(
                self.transform_outputs(self.y_observed.numpy()))

            # build and fit GP
            gp = SingleTaskGP(
                train_X=self.X_observed,
                train_Y=z_observed,
                # special likelihood for numerical Cholesky errors, following advice from
                # https://www.gitmemory.com/issue/pytorch/botorch/179/506276521
                likelihood=GaussianLikelihood(
                    noise_constraint=GreaterThan(1e-3)),
            )
            mll = ExactMarginalLogLikelihood(gp.likelihood, gp)
            fit_gpytorch_model(mll)

            acq = self.expected_improvement(
                model=gp,
                best_f=z_observed.min(dim=0).values,
            )

            if candidates is None:
                candidate, acq_value = optimize_acqf(
                    acq,
                    bounds=self.bounds_tensor,
                    q=1,
                    num_restarts=5,
                    raw_samples=100,
                )
                return candidate[0]
            else:
                # (N,)
                ei = acq(torch.Tensor(candidates).unsqueeze(dim=-2))
                return torch.Tensor(candidates[ei.argmax()])
    def test_ModelListGP(self, cuda=False):
        for double in (False, True):
            tkwargs = {
                "device":
                torch.device("cuda") if cuda else torch.device("cpu"),
                "dtype": torch.double if double else torch.float,
            }
            model = _get_model(n=10, **tkwargs)
            self.assertIsInstance(model, ModelListGP)
            self.assertIsInstance(model.likelihood, LikelihoodList)
            for m in model.models:
                self.assertIsInstance(m.mean_module, ConstantMean)
                self.assertIsInstance(m.covar_module, ScaleKernel)
                matern_kernel = m.covar_module.base_kernel
                self.assertIsInstance(matern_kernel, MaternKernel)
                self.assertIsInstance(matern_kernel.lengthscale_prior,
                                      GammaPrior)

            # test model fitting
            mll = SumMarginalLogLikelihood(model.likelihood, model)
            for mll_ in mll.mlls:
                self.assertIsInstance(mll_, ExactMarginalLogLikelihood)
            mll = fit_gpytorch_model(mll, options={"maxiter": 1})

            # test posterior
            test_x = torch.tensor([[0.25], [0.75]], **tkwargs)
            posterior = model.posterior(test_x)
            self.assertIsInstance(posterior, GPyTorchPosterior)
            self.assertIsInstance(posterior.mvn, MultitaskMultivariateNormal)

            # test observation_noise
            posterior = model.posterior(test_x, observation_noise=True)
            self.assertIsInstance(posterior, GPyTorchPosterior)
            self.assertIsInstance(posterior.mvn, MultitaskMultivariateNormal)

            # test output_indices
            posterior = model.posterior(test_x,
                                        output_indices=[0],
                                        observation_noise=True)
            self.assertIsInstance(posterior, GPyTorchPosterior)
            self.assertIsInstance(posterior.mvn, MultivariateNormal)
    def test_ModelListGP(self, cuda=False):
        for double in (False, True):
            tkwargs = {
                "device": torch.device("cuda") if cuda else torch.device("cpu"),
                "dtype": torch.double if double else torch.float,
            }
            model = _get_model(n=10, **tkwargs)
            self.assertIsInstance(model, ModelListGP)
            self.assertIsInstance(model.likelihood, LikelihoodList)
            for m in model.models:
                self.assertIsInstance(m.mean_module, ConstantMean)
                self.assertIsInstance(m.covar_module, ScaleKernel)
                matern_kernel = m.covar_module.base_kernel
                self.assertIsInstance(matern_kernel, MaternKernel)
                self.assertIsInstance(matern_kernel.lengthscale_prior, GammaPrior)

            # test model fitting
            mll = SumMarginalLogLikelihood(model.likelihood, model)
            for mll_ in mll.mlls:
                self.assertIsInstance(mll_, ExactMarginalLogLikelihood)
            mll = fit_gpytorch_model(mll, options={"maxiter": 1})

            # test posterior
            test_x = torch.tensor([[0.25], [0.75]], **tkwargs)
            posterior = model.posterior(test_x)
            self.assertIsInstance(posterior, GPyTorchPosterior)
            self.assertIsInstance(posterior.mvn, MultitaskMultivariateNormal)

            # test observation_noise
            posterior = model.posterior(test_x, observation_noise=True)
            self.assertIsInstance(posterior, GPyTorchPosterior)
            self.assertIsInstance(posterior.mvn, MultitaskMultivariateNormal)

            # test output_indices
            posterior = model.posterior(
                test_x, output_indices=[0], observation_noise=True
            )
            self.assertIsInstance(posterior, GPyTorchPosterior)
            self.assertIsInstance(posterior.mvn, MultivariateNormal)
示例#35
0
    def test_fit_gpytorch_model(self, optimizer=fit_gpytorch_scipy):
        options = {"disp": False, "maxiter": 5}
        for double in (False, True):
            mll = self._getModel(double=double)
            with warnings.catch_warnings(record=True) as ws, settings.debug(True):
                mll = fit_gpytorch_model(
                    mll, optimizer=optimizer, options=options, max_retries=1
                )
                if optimizer == fit_gpytorch_scipy:
                    self.assertTrue(
                        any(issubclass(w.category, OptimizationWarning)) for w in ws
                    )
                    self.assertEqual(
                        sum(1 for w in ws if MAX_RETRY_MSG in str(w.message)), 1
                    )
            model = mll.model
            # Make sure all of the parameters changed
            self.assertGreater(model.likelihood.raw_noise.abs().item(), 1e-3)
            self.assertLess(model.mean_module.constant.abs().item(), 0.1)
            self.assertGreater(
                model.covar_module.base_kernel.raw_lengthscale.abs().item(), 0.1
            )
            self.assertGreater(model.covar_module.raw_outputscale.abs().item(), 1e-3)

            # test overriding the default bounds with user supplied bounds
            mll = self._getModel(double=double)
            with warnings.catch_warnings(record=True) as ws, settings.debug(True):
                mll = fit_gpytorch_model(
                    mll,
                    optimizer=optimizer,
                    options=options,
                    max_retries=1,
                    bounds={"likelihood.noise_covar.raw_noise": (1e-1, None)},
                )
                if optimizer == fit_gpytorch_scipy:
                    self.assertTrue(
                        any(issubclass(w.category, OptimizationWarning)) for w in ws
                    )
                    self.assertEqual(
                        sum(1 for w in ws if MAX_RETRY_MSG in str(w.message)), 1
                    )

            model = mll.model
            self.assertGreaterEqual(model.likelihood.raw_noise.abs().item(), 1e-1)
            self.assertLess(model.mean_module.constant.abs().item(), 0.1)
            self.assertGreater(
                model.covar_module.base_kernel.raw_lengthscale.abs().item(), 0.1
            )
            self.assertGreater(model.covar_module.raw_outputscale.abs().item(), 1e-3)

            # test tracking iterations
            mll = self._getModel(double=double)
            if optimizer is fit_gpytorch_torch:
                options["disp"] = True
            with warnings.catch_warnings(record=True) as ws, settings.debug(True):
                mll, info_dict = optimizer(mll, options=options, track_iterations=True)
                if optimizer == fit_gpytorch_scipy:
                    self.assertEqual(
                        sum(1 for w in ws if MAX_ITER_MSG in str(w.message)), 1
                    )
            self.assertEqual(len(info_dict["iterations"]), options["maxiter"])
            self.assertIsInstance(info_dict["iterations"][0], OptimizationIteration)
            self.assertTrue("fopt" in info_dict)
            self.assertTrue("wall_time" in info_dict)

            # Test different optimizer, for scipy optimizer,
            # because of different scipy OptimizeResult.message type
            if optimizer == fit_gpytorch_scipy:
                with warnings.catch_warnings(record=True) as ws, settings.debug(True):
                    mll, info_dict = optimizer(
                        mll, options=options, track_iterations=False, method="slsqp"
                    )
                self.assertGreaterEqual(len(ws), 1)
                self.assertEqual(len(info_dict["iterations"]), 0)
                self.assertTrue("fopt" in info_dict)
                self.assertTrue("wall_time" in info_dict)

            # test extra param that does not affect loss
            options["disp"] = False
            mll = self._getModel(double=double)
            mll.register_parameter(
                "dummy_param",
                torch.nn.Parameter(
                    torch.tensor(
                        [5.0],
                        dtype=torch.double if double else torch.float,
                        device=self.device,
                    )
                ),
            )
            with warnings.catch_warnings(record=True) as ws, settings.debug(True):
                mll = fit_gpytorch_model(
                    mll, optimizer=optimizer, options=options, max_retries=1
                )
                if optimizer == fit_gpytorch_scipy:
                    self.assertEqual(
                        sum(1 for w in ws if MAX_RETRY_MSG in str(w.message)), 1
                    )
            self.assertTrue(mll.dummy_param.grad is None)

            # test excluding a parameter
            mll = self._getModel(double=double)
            original_raw_noise = mll.model.likelihood.noise_covar.raw_noise.item()
            original_mean_module_constant = mll.model.mean_module.constant.item()
            options["exclude"] = [
                "model.mean_module.constant",
                "likelihood.noise_covar.raw_noise",
            ]
            with warnings.catch_warnings(record=True) as ws, settings.debug(True):
                mll = fit_gpytorch_model(
                    mll, optimizer=optimizer, options=options, max_retries=1
                )
                if optimizer == fit_gpytorch_scipy:
                    self.assertTrue(
                        any(issubclass(w.category, OptimizationWarning)) for w in ws
                    )
                    self.assertEqual(
                        sum(1 for w in ws if MAX_RETRY_MSG in str(w.message)), 1
                    )
            model = mll.model
            # Make excluded params did not change
            self.assertEqual(
                model.likelihood.noise_covar.raw_noise.item(), original_raw_noise
            )
            self.assertEqual(
                model.mean_module.constant.item(), original_mean_module_constant
            )
            # Make sure other params did change
            self.assertGreater(
                model.covar_module.base_kernel.raw_lengthscale.abs().item(), 0.1
            )
            self.assertGreater(model.covar_module.raw_outputscale.abs().item(), 1e-3)

            # test non-default setting for approximate MLL computation
            is_scipy = optimizer == fit_gpytorch_scipy
            mll = self._getModel(double=double)
            with warnings.catch_warnings(record=True) as ws, settings.debug(True):
                mll = fit_gpytorch_model(
                    mll,
                    optimizer=optimizer,
                    options=options,
                    max_retries=1,
                    approx_mll=is_scipy,
                )
                if is_scipy:
                    self.assertTrue(
                        any(issubclass(w.category, OptimizationWarning)) for w in ws
                    )
                    self.assertEqual(
                        sum(1 for w in ws if MAX_RETRY_MSG in str(w.message)), 1
                    )
            model = mll.model
            # Make sure all of the parameters changed
            self.assertGreater(model.likelihood.raw_noise.abs().item(), 1e-3)
            self.assertLess(model.mean_module.constant.abs().item(), 0.1)
            self.assertGreater(
                model.covar_module.base_kernel.raw_lengthscale.abs().item(), 0.1
            )
            self.assertGreater(model.covar_module.raw_outputscale.abs().item(), 1e-3)
示例#36
0
    def test_gp(self):
        for batch_shape, m, dtype, use_octf in itertools.product(
            (torch.Size(), torch.Size([2])),
            (1, 2),
            (torch.float, torch.double),
            (False, True),
        ):
            tkwargs = {"device": self.device, "dtype": dtype}
            octf = Standardize(m=m,
                               batch_shape=batch_shape) if use_octf else None
            model, _ = self._get_model_and_data(batch_shape=batch_shape,
                                                m=m,
                                                outcome_transform=octf,
                                                **tkwargs)
            mll = ExactMarginalLogLikelihood(model.likelihood,
                                             model).to(**tkwargs)
            with warnings.catch_warnings():
                warnings.filterwarnings("ignore", category=OptimizationWarning)
                fit_gpytorch_model(mll, options={"maxiter": 1}, max_retries=1)

            # test init
            self.assertIsInstance(model.mean_module, ConstantMean)
            self.assertIsInstance(model.covar_module, ScaleKernel)
            matern_kernel = model.covar_module.base_kernel
            self.assertIsInstance(matern_kernel, MaternKernel)
            self.assertIsInstance(matern_kernel.lengthscale_prior, GammaPrior)
            if use_octf:
                self.assertIsInstance(model.outcome_transform, Standardize)

            # test param sizes
            params = dict(model.named_parameters())
            for p in params:
                self.assertEqual(params[p].numel(),
                                 m * torch.tensor(batch_shape).prod().item())

            # test posterior
            # test non batch evaluation
            X = torch.rand(batch_shape + torch.Size([3, 1]), **tkwargs)
            expected_shape = batch_shape + torch.Size([3, m])
            posterior = model.posterior(X)
            self.assertIsInstance(posterior, GPyTorchPosterior)
            self.assertEqual(posterior.mean.shape, expected_shape)
            self.assertEqual(posterior.variance.shape, expected_shape)

            # test adding observation noise
            posterior_pred = model.posterior(X, observation_noise=True)
            self.assertIsInstance(posterior_pred, GPyTorchPosterior)
            self.assertEqual(posterior_pred.mean.shape, expected_shape)
            self.assertEqual(posterior_pred.variance.shape, expected_shape)
            if use_octf:
                # ensure un-transformation is applied
                tmp_tf = model.outcome_transform
                del model.outcome_transform
                pp_tf = model.posterior(X, observation_noise=True)
                model.outcome_transform = tmp_tf
                expected_var = tmp_tf.untransform_posterior(pp_tf).variance
                self.assertTrue(
                    torch.allclose(posterior_pred.variance, expected_var))
            else:
                pvar = posterior_pred.variance
                pvar_exp = _get_pvar_expected(posterior, model, X, m)
                self.assertTrue(
                    torch.allclose(pvar, pvar_exp, rtol=1e-4, atol=1e-5))

            # test batch evaluation
            X = torch.rand(2, *batch_shape, 3, 1, **tkwargs)
            expected_shape = torch.Size([2]) + batch_shape + torch.Size([3, m])

            posterior = model.posterior(X)
            self.assertIsInstance(posterior, GPyTorchPosterior)
            self.assertEqual(posterior.mean.shape, expected_shape)
            # test adding observation noise in batch mode
            posterior_pred = model.posterior(X, observation_noise=True)
            self.assertIsInstance(posterior_pred, GPyTorchPosterior)
            self.assertEqual(posterior_pred.mean.shape, expected_shape)
            if use_octf:
                # ensure un-transformation is applied
                tmp_tf = model.outcome_transform
                del model.outcome_transform
                pp_tf = model.posterior(X, observation_noise=True)
                model.outcome_transform = tmp_tf
                expected_var = tmp_tf.untransform_posterior(pp_tf).variance
                self.assertTrue(
                    torch.allclose(posterior_pred.variance, expected_var))
            else:
                pvar = posterior_pred.variance
                pvar_exp = _get_pvar_expected(posterior, model, X, m)
                self.assertTrue(
                    torch.allclose(pvar, pvar_exp, rtol=1e-4, atol=1e-5))
示例#37
0
    def test_MultiTaskGP(self, cuda=False):
        for double in (False, True):
            tkwargs = {
                "device": torch.device("cuda") if cuda else torch.device("cpu"),
                "dtype": torch.double if double else torch.float,
            }
            model = _get_model(**tkwargs)
            self.assertIsInstance(model, MultiTaskGP)
            self.assertIsInstance(model.likelihood, GaussianLikelihood)
            self.assertIsInstance(model.mean_module, ConstantMean)
            self.assertIsInstance(model.covar_module, ScaleKernel)
            matern_kernel = model.covar_module.base_kernel
            self.assertIsInstance(matern_kernel, MaternKernel)
            self.assertIsInstance(matern_kernel.lengthscale_prior, GammaPrior)
            self.assertIsInstance(model.task_covar_module, IndexKernel)
            self.assertEqual(model._rank, 2)
            self.assertEqual(
                model.task_covar_module.covar_factor.shape[-1], model._rank
            )

            # test model fitting
            mll = ExactMarginalLogLikelihood(model.likelihood, model)
            mll = fit_gpytorch_model(mll, options={"maxiter": 1})

            # test posterior
            test_x = torch.rand(2, 1, **tkwargs)
            posterior_f = model.posterior(test_x)
            self.assertIsInstance(posterior_f, GPyTorchPosterior)
            self.assertIsInstance(posterior_f.mvn, MultitaskMultivariateNormal)
            self.assertEqual(posterior_f.mean.shape, torch.Size([2, 2]))
            self.assertEqual(posterior_f.variance.shape, torch.Size([2, 2]))

            # test posterior w/ observation noise
            posterior_o = model.posterior(test_x, observation_noise=True)
            self.assertIsInstance(posterior_o, GPyTorchPosterior)
            self.assertIsInstance(posterior_f.mvn, MultitaskMultivariateNormal)
            self.assertEqual(posterior_f.mean.shape, torch.Size([2, 2]))
            self.assertEqual(posterior_f.variance.shape, torch.Size([2, 2]))

            # test posterior w/ single output index
            posterior_f = model.posterior(test_x, output_indices=[0])
            self.assertIsInstance(posterior_f, GPyTorchPosterior)
            self.assertIsInstance(posterior_f.mvn, MultivariateNormal)
            self.assertEqual(posterior_f.mean.shape, torch.Size([2, 1]))
            self.assertEqual(posterior_f.variance.shape, torch.Size([2, 1]))

            # test posterior w/ bad output index
            with self.assertRaises(ValueError):
                model.posterior(test_x, output_indices=[2])

            # test posterior (batch eval)
            test_x = torch.rand(3, 2, 1, **tkwargs)
            posterior_f = model.posterior(test_x)
            self.assertIsInstance(posterior_f, GPyTorchPosterior)
            self.assertIsInstance(posterior_f.mvn, MultitaskMultivariateNormal)

            # test that unsupported batch shape MTGPs throw correct error
            with self.assertRaises(ValueError):
                MultiTaskGP(torch.rand(2, 2, 2), torch.rand(2, 1), 0)

            # test that bad feature index throws correct error
            train_X, train_Y = _get_random_mt_data(**tkwargs)
            with self.assertRaises(ValueError):
                MultiTaskGP(train_X, train_Y, 2)

            # test that bad output task throws correct error
            with self.assertRaises(RuntimeError):
                MultiTaskGP(train_X, train_Y, 0, output_tasks=[2])