def test_compute_linear_truncated_kernel_no_batch(self):
        x1 = torch.tensor([[1, 0.1, 0.2], [2, 0.3, 0.4]])
        x2 = torch.tensor([[3, 0.5, 0.6], [4, 0.7, 0.8]])
        t_1 = torch.tensor([[0.3584, 0.1856], [0.2976, 0.1584]])
        for nu, fidelity_dims in itertools.product({0.5, 1.5, 2.5}, ([2], [1, 2])):
            kernel = LinearTruncatedFidelityKernel(
                fidelity_dims=fidelity_dims, dimension=3, nu=nu
            )
            kernel.power = 1
            n_fid = len(fidelity_dims)
            if n_fid > 1:
                active_dimsM = [0]
                t_2 = torch.tensor([[0.4725, 0.2889], [0.4025, 0.2541]])
                t_3 = torch.tensor([[0.1685, 0.0531], [0.1168, 0.0386]])
                t = 1 + t_1 + t_2 + t_3
            else:
                active_dimsM = [0, 1]
                t = 1 + t_1

            matern_ker = MaternKernel(nu=nu, active_dims=active_dimsM)
            matern_term = matern_ker(x1, x2).evaluate()
            actual = t * matern_term
            res = kernel(x1, x2).evaluate()
            self.assertLess(torch.norm(res - actual), 1e-4)
            # test diagonal mode
            res_diag = kernel(x1, x2, diag=True)
            self.assertLess(torch.norm(res_diag - actual.diag()), 1e-4)
        # make sure that we error out if last_dim_is_batch=True
        with self.assertRaises(NotImplementedError):
            kernel(x1, x2, diag=True, last_dim_is_batch=True)
示例#2
0
    def test_compute_linear_truncated_kernel_no_batch(self):
        x1 = torch.tensor([1, 0.1, 0.2, 2, 0.3, 0.4],
                          dtype=torch.float).view(2, 3)
        x2 = torch.tensor([3, 0.5, 0.6, 4, 0.7, 0.8],
                          dtype=torch.float).view(2, 3)
        t_1 = torch.tensor([0.3584, 0.1856, 0.2976, 0.1584],
                           dtype=torch.float).view(2, 2)
        for nu in {0.5, 1.5, 2.5}:
            for fidelity_dims in ([2], [1, 2]):
                kernel = LinearTruncatedFidelityKernel(
                    fidelity_dims=fidelity_dims, dimension=3, nu=nu)
                kernel.power = 1
                if len(fidelity_dims) > 1:
                    active_dimsM = [0]
                    t_2 = torch.tensor([0.4725, 0.2889, 0.4025, 0.2541],
                                       dtype=torch.float).view(2, 2)
                    t_3 = torch.tensor([0.1685, 0.0531, 0.1168, 0.0386],
                                       dtype=torch.float).view(2, 2)
                    t = 1 + t_1 + t_2 + t_3
                else:
                    active_dimsM = [0, 1]
                    t = 1 + t_1

                matern_ker = MaternKernel(nu=nu, active_dims=active_dimsM)
                matern_term = matern_ker(x1, x2).evaluate()
                actual = t * matern_term
                res = kernel(x1, x2).evaluate()
                self.assertLess(torch.norm(res - actual), 1e-4)
示例#3
0
 def test_initialize_lengthscale_prior(self):
     kernel = LinearTruncatedFidelityKernel(fidelity_dims=[1, 2],
                                            dimension=3)
     self.assertTrue(
         isinstance(kernel.covar_module_unbiased.lengthscale_prior,
                    GammaPrior))
     self.assertTrue(
         isinstance(kernel.covar_module_biased.lengthscale_prior,
                    GammaPrior))
     kernel2 = LinearTruncatedFidelityKernel(
         fidelity_dims=[1, 2],
         dimension=3,
         lengthscale_prior_unbiased=NormalPrior(1, 1),
     )
     self.assertTrue(
         isinstance(kernel2.covar_module_unbiased.lengthscale_prior,
                    NormalPrior))
     kernel2 = LinearTruncatedFidelityKernel(
         fidelity_dims=[1, 2],
         dimension=3,
         lengthscale_prior_biased=NormalPrior(1, 1),
     )
     self.assertTrue(
         isinstance(kernel2.covar_module_biased.lengthscale_prior,
                    NormalPrior))
 def test_initialize_power_batch(self):
     kernel = LinearTruncatedFidelityKernel(
         fidelity_dims=[1, 2], dimension=3, batch_shape=torch.Size([2])
     )
     power_init = torch.tensor([1, 2], dtype=torch.float)
     kernel.initialize(power=power_init)
     actual_value = power_init.view_as(kernel.power)
     self.assertLess(torch.norm(kernel.power - actual_value), 1e-5)
 def test_raise_init_errors(self):
     with self.assertRaises(UnsupportedError):
         LinearTruncatedFidelityKernel(fidelity_dims=[2])
     with self.assertRaises(UnsupportedError):
         LinearTruncatedFidelityKernel(fidelity_dims=[0, 1, 2], dimension=3)
     with self.assertRaises(ValueError):
         LinearTruncatedFidelityKernel(fidelity_dims=[2, 2], dimension=3)
     with self.assertRaises(ValueError):
         LinearTruncatedFidelityKernel(fidelity_dims=[2], dimension=2, nu=1)
示例#6
0
    def test_compute_linear_truncated_kernel_with_batch(self):
        x1 = torch.tensor([1, 0.1, 0.2, 3, 0.3, 0.4, 5, 0.5, 0.6, 7, 0.7, 0.8],
                          dtype=torch.float).view(2, 2, 3)
        x2 = torch.tensor([2, 0.8, 0.7, 4, 0.6, 0.5, 6, 0.4, 0.3, 8, 0.2, 0.1],
                          dtype=torch.float).view(2, 2, 3)
        t_1 = torch.tensor(
            [0.2736, 0.44, 0.2304, 0.36, 0.3304, 0.3816, 0.1736, 0.1944],
            dtype=torch.float,
        ).view(2, 2, 2)
        batch_shape = torch.Size([2])
        for nu in {0.5, 1.5, 2.5}:
            for fidelity_dims in ([2], [1, 2]):
                kernel = LinearTruncatedFidelityKernel(
                    fidelity_dims=fidelity_dims,
                    dimension=3,
                    nu=nu,
                    batch_shape=batch_shape,
                )
                kernel.power = 1
                if len(fidelity_dims) > 1:
                    active_dimsM = [0]
                    t_2 = torch.tensor(
                        [
                            0.0527, 0.167, 0.0383, 0.1159, 0.1159, 0.167,
                            0.0383, 0.0527
                        ],
                        dtype=torch.float,
                    ).view(2, 2, 2)
                    t_3 = torch.tensor(
                        [
                            0.1944, 0.3816, 0.1736, 0.3304, 0.36, 0.44, 0.2304,
                            0.2736
                        ],
                        dtype=torch.float,
                    ).view(2, 2, 2)
                    t = 1 + t_1 + t_2 + t_3
                else:
                    active_dimsM = [0, 1]
                    t = 1 + t_1

                matern_ker = MaternKernel(nu=nu,
                                          active_dims=active_dimsM,
                                          batch_shape=batch_shape)
                matern_term = matern_ker(x1, x2).evaluate()
                actual = t * matern_term
                res = kernel(x1, x2).evaluate()
                self.assertLess(torch.norm(res - actual), 1e-4)
                # test diagonal mode
                res_diag = kernel(x1, x2, diag=True)
                self.assertLess(
                    torch.norm(res_diag -
                               torch.diagonal(actual, dim1=-1, dim2=-2)),
                    1e-4,
                )
        # make sure that we error out if last_dim_is_batch=True
        with self.assertRaises(NotImplementedError):
            kernel(x1, x2, diag=True, last_dim_is_batch=True)
 def test_active_dims_list(self):
     kernel = LinearTruncatedFidelityKernel(
         fidelity_dims=[1, 2], dimension=10, active_dims=[0, 2, 4, 6]
     )
     x = self.create_data_no_batch()
     covar_mat = kernel(x).evaluate_kernel().evaluate()
     kernel_basic = LinearTruncatedFidelityKernel(fidelity_dims=[1, 2], dimension=4)
     covar_mat_actual = kernel_basic(x[:, [0, 2, 4, 6]]).evaluate_kernel().evaluate()
     self.assertLess(
         torch.norm(covar_mat - covar_mat_actual) / covar_mat_actual.norm(), 1e-4
     )
 def test_active_dims_range(self):
     active_dims = list(range(3, 9))
     kernel = LinearTruncatedFidelityKernel(
         fidelity_dims=[1, 2], dimension=10, active_dims=active_dims
     )
     x = self.create_data_no_batch()
     covar_mat = kernel(x).evaluate_kernel().evaluate()
     kernel_basic = LinearTruncatedFidelityKernel(fidelity_dims=[1, 2], dimension=6)
     covar_mat_actual = kernel_basic(x[:, active_dims]).evaluate_kernel().evaluate()
     self.assertLess(
         torch.norm(covar_mat - covar_mat_actual) / covar_mat_actual.norm(), 1e-4
     )
    def test_compute_linear_truncated_kernel_with_batch(self):
        x1 = torch.tensor(
            [[[1.0, 0.1, 0.2], [3.0, 0.3, 0.4]], [[5.0, 0.5, 0.6], [7.0, 0.7, 0.8]]]
        )
        x2 = torch.tensor(
            [[[2.0, 0.8, 0.7], [4.0, 0.6, 0.5]], [[6.0, 0.4, 0.3], [8.0, 0.2, 0.1]]]
        )
        t_1 = torch.tensor(
            [[[0.2736, 0.4400], [0.2304, 0.3600]], [[0.3304, 0.3816], [0.1736, 0.1944]]]
        )
        batch_shape = torch.Size([2])
        for nu, fidelity_dims in itertools.product({0.5, 1.5, 2.5}, ([2], [1, 2])):
            kernel = LinearTruncatedFidelityKernel(
                fidelity_dims=fidelity_dims, dimension=3, nu=nu, batch_shape=batch_shape
            )
            kernel.power = 1
            if len(fidelity_dims) > 1:
                active_dimsM = [0]
                t_2 = torch.tensor(
                    [
                        [[0.0527, 0.1670], [0.0383, 0.1159]],
                        [[0.1159, 0.1670], [0.0383, 0.0527]],
                    ]
                )
                t_3 = torch.tensor(
                    [
                        [[0.1944, 0.3816], [0.1736, 0.3304]],
                        [[0.3600, 0.4400], [0.2304, 0.2736]],
                    ]
                )
                t = 1 + t_1 + t_2 + t_3
            else:
                active_dimsM = [0, 1]
                t = 1 + t_1

            matern_ker = MaternKernel(
                nu=nu, active_dims=active_dimsM, batch_shape=batch_shape
            )
            matern_term = matern_ker(x1, x2).evaluate()
            actual = t * matern_term
            res = kernel(x1, x2).evaluate()
            self.assertLess(torch.norm(res - actual), 1e-4)
            # test diagonal mode
            res_diag = kernel(x1, x2, diag=True)
            self.assertLess(
                torch.norm(res_diag - torch.diagonal(actual, dim1=-1, dim2=-2)), 1e-4
            )
        # make sure that we error out if last_dim_is_batch=True
        with self.assertRaises(NotImplementedError):
            kernel(x1, x2, diag=True, last_dim_is_batch=True)
 def test_initialize_covar_module(self):
     kernel = LinearTruncatedFidelityKernel(fidelity_dims=[1, 2], dimension=3)
     self.assertTrue(isinstance(kernel.covar_module_unbiased, MaternKernel))
     self.assertTrue(isinstance(kernel.covar_module_biased, MaternKernel))
     kernel.covar_module_unbiased = RBFKernel()
     kernel.covar_module_biased = RBFKernel()
     self.assertTrue(isinstance(kernel.covar_module_unbiased, RBFKernel))
     self.assertTrue(isinstance(kernel.covar_module_biased, RBFKernel))
     kernel2 = LinearTruncatedFidelityKernel(
         fidelity_dims=[1, 2],
         dimension=3,
         covar_module_unbiased=RBFKernel(),
         covar_module_biased=RBFKernel(),
     )
     self.assertTrue(isinstance(kernel2.covar_module_unbiased, RBFKernel))
     self.assertTrue(isinstance(kernel2.covar_module_biased, RBFKernel))
示例#11
0
    def test_compute_linear_truncated_kernel_with_batch(self):
        x1 = torch.tensor([1, 0.1, 0.2, 3, 0.3, 0.4, 5, 0.5, 0.6, 7, 0.7, 0.8],
                          dtype=torch.float).view(2, 2, 3)
        x2 = torch.tensor([2, 0.8, 0.7, 4, 0.6, 0.5, 6, 0.4, 0.3, 8, 0.2, 0.1],
                          dtype=torch.float).view(2, 2, 3)
        t_1 = torch.tensor(
            [0.2736, 0.44, 0.2304, 0.36, 0.3304, 0.3816, 0.1736, 0.1944],
            dtype=torch.float,
        ).view(2, 2, 2)
        batch_shape = torch.Size([2])
        for nu in {0.5, 1.5, 2.5}:
            for fidelity_dims in ([2], [1, 2]):
                kernel = LinearTruncatedFidelityKernel(
                    fidelity_dims=fidelity_dims,
                    dimension=3,
                    nu=nu,
                    batch_shape=batch_shape,
                )
                kernel.power = 1
                if len(fidelity_dims) > 1:
                    active_dimsM = [0]
                    t_2 = torch.tensor(
                        [
                            0.0527, 0.167, 0.0383, 0.1159, 0.1159, 0.167,
                            0.0383, 0.0527
                        ],
                        dtype=torch.float,
                    ).view(2, 2, 2)
                    t_3 = torch.tensor(
                        [
                            0.1944, 0.3816, 0.1736, 0.3304, 0.36, 0.44, 0.2304,
                            0.2736
                        ],
                        dtype=torch.float,
                    ).view(2, 2, 2)
                    t = 1 + t_1 + t_2 + t_3
                else:
                    active_dimsM = [0, 1]
                    t = 1 + t_1

                matern_ker = MaternKernel(nu=nu,
                                          active_dims=active_dimsM,
                                          batch_shape=batch_shape)
                matern_term = matern_ker(x1, x2).evaluate()
                actual = t * matern_term
                res = kernel(x1, x2).evaluate()
                self.assertLess(torch.norm(res - actual), 1e-4)
示例#12
0
 def test_error_on_fidelity_only(self):
     x1 = torch.tensor([[0.1], [0.3]])
     x2 = torch.tensor([[0.5], [0.7]])
     kernel = LinearTruncatedFidelityKernel(fidelity_dims=[0],
                                            dimension=1,
                                            nu=2.5)
     with self.assertRaises(RuntimeError):
         kernel(x1, x2).evaluate()
示例#13
0
def _setup_multifidelity_covar_module(
    dim: int,
    aug_batch_shape: torch.Size,
    iteration_fidelity: Optional[int],
    data_fidelity: Optional[int],
    linear_truncated: bool,
    nu: float,
) -> Tuple[ScaleKernel, Dict]:
    """Helper function to get the covariance module and associated subset_batch_dict
    for the multifidelity setting.

    Args:
        dim: The dimensionality of the training data.
        aug_batch_shape: The output-augmented batch shape as defined in
            `BatchedMultiOutputGPyTorchModel`.
        iteration_fidelity: The column index for the training iteration fidelity
            parameter (optional).
        data_fidelity: The column index for the downsampling fidelity parameter
            (optional).
        linear_truncated: If True, use a `LinearTruncatedFidelityKernel` instead
            of the default kernel.
        nu: The smoothness parameter for the Matern kernel: either 1/2, 3/2, or
            5/2. Only used when `linear_truncated=True`.

    Returns:
        The covariance module and subset_batch_dict.
    """

    if iteration_fidelity is not None and iteration_fidelity < 0:
        iteration_fidelity = dim + iteration_fidelity
    if data_fidelity is not None and data_fidelity < 0:
        data_fidelity = dim + data_fidelity

    if linear_truncated:
        fidelity_dims = [
            i for i in (iteration_fidelity, data_fidelity) if i is not None
        ]
        kernel = LinearTruncatedFidelityKernel(
            fidelity_dims=fidelity_dims,
            dimension=dim,
            nu=nu,
            batch_shape=aug_batch_shape,
            power_prior=GammaPrior(3.0, 3.0),
        )
    else:
        active_dimsX = [
            i for i in range(dim)
            if i not in {iteration_fidelity, data_fidelity}
        ]
        kernel = RBFKernel(
            ard_num_dims=len(active_dimsX),
            batch_shape=aug_batch_shape,
            lengthscale_prior=GammaPrior(3.0, 6.0),
            active_dims=active_dimsX,
        )
        additional_kernels = []
        if iteration_fidelity is not None:
            exp_kernel = ExponentialDecayKernel(
                batch_shape=aug_batch_shape,
                lengthscale_prior=GammaPrior(3.0, 6.0),
                offset_prior=GammaPrior(3.0, 6.0),
                power_prior=GammaPrior(3.0, 6.0),
                active_dims=[iteration_fidelity],
            )
            additional_kernels.append(exp_kernel)
        if data_fidelity is not None:
            ds_kernel = DownsamplingKernel(
                batch_shape=aug_batch_shape,
                offset_prior=GammaPrior(3.0, 6.0),
                power_prior=GammaPrior(3.0, 6.0),
                active_dims=[data_fidelity],
            )
            additional_kernels.append(ds_kernel)
        kernel = ProductKernel(kernel, *additional_kernels)

    covar_module = ScaleKernel(kernel,
                               batch_shape=aug_batch_shape,
                               outputscale_prior=GammaPrior(2.0, 0.15))

    if linear_truncated:
        subset_batch_dict = {
            "covar_module.base_kernel.raw_power": -2,
            "covar_module.base_kernel.covar_module_unbiased.raw_lengthscale":
            -3,
            "covar_module.base_kernel.covar_module_biased.raw_lengthscale": -3,
        }
    else:
        subset_batch_dict = {
            "covar_module.base_kernel.kernels.0.raw_lengthscale": -3,
            "covar_module.base_kernel.kernels.1.raw_power": -2,
            "covar_module.base_kernel.kernels.1.raw_offset": -2,
        }
        if iteration_fidelity is not None:
            subset_batch_dict = {
                "covar_module.base_kernel.kernels.1.raw_lengthscale": -3,
                **subset_batch_dict,
            }
            if data_fidelity is not None:
                subset_batch_dict = {
                    "covar_module.base_kernel.kernels.2.raw_power": -2,
                    "covar_module.base_kernel.kernels.2.raw_offset": -2,
                    **subset_batch_dict,
                }

    return covar_module, subset_batch_dict
 def create_kernel_no_ard(self, **kwargs):
     return LinearTruncatedFidelityKernel(
         fidelity_dims=[1, 2], dimension=3, **kwargs
     )
 def test_initialize_power(self):
     kernel = LinearTruncatedFidelityKernel(fidelity_dims=[1, 2], dimension=3)
     kernel.initialize(power=1)
     actual_value = torch.tensor(1, dtype=torch.float).view_as(kernel.power)
     self.assertLess(torch.norm(kernel.power - actual_value), 1e-5)
 def test_initialize_power_prior(self):
     kernel = LinearTruncatedFidelityKernel(
         fidelity_dims=[1, 2], dimension=3, power_prior=NormalPrior(1, 1)
     )
     self.assertTrue(isinstance(kernel.power_prior, NormalPrior))