コード例 #1
0
    def test_computes_exponential_decay_function(self):
        a = torch.tensor([1.0, 2.0]).view(2, 1)
        b = torch.tensor([2.0, 4.0]).view(2, 1)
        lengthscale = 1
        power = 1
        offset = 1

        kernel = ExponentialDecayKernel()
        kernel.initialize(lengthscale=lengthscale, power=power, offset=offset)
        kernel.eval()

        diff = torch.tensor([[4.0, 6.0], [5.0, 7.0]])
        actual = offset + torch.tensor([1.0]).div(diff.pow(power))
        res = kernel(a, b).evaluate()

        self.assertLess(torch.norm(res - actual), 1e-5)
コード例 #2
0
    def test_subset_active_compute_exponential_decay_function(self):
        a = torch.tensor([1.0, 2.0]).view(2, 1)
        a_p = torch.tensor([3.0, 4.0]).view(2, 1)
        a = torch.cat((a, a_p), 1)
        b = torch.tensor([2.0, 4.0]).view(2, 1)
        lengthscale = 1
        power = 1
        offset = 1

        kernel = ExponentialDecayKernel(active_dims=[0])
        kernel.initialize(lengthscale=lengthscale, power=power, offset=offset)
        kernel.eval()

        diff = torch.tensor([[4.0, 6.0], [5.0, 7.0]])
        actual = offset + diff.pow(-power)
        res = kernel(a, b).evaluate()

        self.assertLess(torch.norm(res - actual), 1e-5)
コード例 #3
0
    def test_computes_exponential_decay_function_batch(self):
        a = torch.tensor([[1.0, 2.0], [3.0, 4.0]]).view(2, 2, 1)
        b = torch.tensor([[5.0, 6.0], [7.0, 8.0]]).view(2, 2, 1)
        lengthscale = 1
        power = 1
        offset = 1

        kernel = ExponentialDecayKernel(batch_shape=torch.Size([2]))
        kernel.initialize(lengthscale=lengthscale, power=power, offset=offset)
        kernel.eval()

        actual = torch.zeros(2, 2, 2)

        diff = torch.tensor([[7.0, 8.0], [8.0, 9.0]])
        actual[0, :, :] = offset + diff.pow(-power)

        diff = torch.tensor([[11.0, 12.0], [12.0, 13.0]])
        actual[1, :, :] = offset + diff.pow(-power)

        res = kernel(a, b).evaluate()
        self.assertLess(torch.norm(res - actual), 1e-5)
コード例 #4
0
def _setup_multifidelity_covar_module(
    dim: int,
    aug_batch_shape: torch.Size,
    iteration_fidelity: Optional[int],
    data_fidelity: Optional[int],
    linear_truncated: bool,
    nu: float,
) -> Tuple[ScaleKernel, Dict]:
    """Helper function to get the covariance module and associated subset_batch_dict
    for the multifidelity setting.

    Args:
        dim: The dimensionality of the training data.
        aug_batch_shape: The output-augmented batch shape as defined in
            `BatchedMultiOutputGPyTorchModel`.
        iteration_fidelity: The column index for the training iteration fidelity
            parameter (optional).
        data_fidelity: The column index for the downsampling fidelity parameter
            (optional).
        linear_truncated: If True, use a `LinearTruncatedFidelityKernel` instead
            of the default kernel.
        nu: The smoothness parameter for the Matern kernel: either 1/2, 3/2, or
            5/2. Only used when `linear_truncated=True`.

    Returns:
        The covariance module and subset_batch_dict.
    """

    if iteration_fidelity is not None and iteration_fidelity < 0:
        iteration_fidelity = dim + iteration_fidelity
    if data_fidelity is not None and data_fidelity < 0:
        data_fidelity = dim + data_fidelity

    if linear_truncated:
        fidelity_dims = [
            i for i in (iteration_fidelity, data_fidelity) if i is not None
        ]
        kernel = LinearTruncatedFidelityKernel(
            fidelity_dims=fidelity_dims,
            dimension=dim,
            nu=nu,
            batch_shape=aug_batch_shape,
            power_prior=GammaPrior(3.0, 3.0),
        )
    else:
        active_dimsX = [
            i for i in range(dim)
            if i not in {iteration_fidelity, data_fidelity}
        ]
        kernel = RBFKernel(
            ard_num_dims=len(active_dimsX),
            batch_shape=aug_batch_shape,
            lengthscale_prior=GammaPrior(3.0, 6.0),
            active_dims=active_dimsX,
        )
        additional_kernels = []
        if iteration_fidelity is not None:
            exp_kernel = ExponentialDecayKernel(
                batch_shape=aug_batch_shape,
                lengthscale_prior=GammaPrior(3.0, 6.0),
                offset_prior=GammaPrior(3.0, 6.0),
                power_prior=GammaPrior(3.0, 6.0),
                active_dims=[iteration_fidelity],
            )
            additional_kernels.append(exp_kernel)
        if data_fidelity is not None:
            ds_kernel = DownsamplingKernel(
                batch_shape=aug_batch_shape,
                offset_prior=GammaPrior(3.0, 6.0),
                power_prior=GammaPrior(3.0, 6.0),
                active_dims=[data_fidelity],
            )
            additional_kernels.append(ds_kernel)
        kernel = ProductKernel(kernel, *additional_kernels)

    covar_module = ScaleKernel(kernel,
                               batch_shape=aug_batch_shape,
                               outputscale_prior=GammaPrior(2.0, 0.15))

    if linear_truncated:
        subset_batch_dict = {
            "covar_module.base_kernel.raw_power": -2,
            "covar_module.base_kernel.covar_module_unbiased.raw_lengthscale":
            -3,
            "covar_module.base_kernel.covar_module_biased.raw_lengthscale": -3,
        }
    else:
        subset_batch_dict = {
            "covar_module.base_kernel.kernels.0.raw_lengthscale": -3,
            "covar_module.base_kernel.kernels.1.raw_power": -2,
            "covar_module.base_kernel.kernels.1.raw_offset": -2,
        }
        if iteration_fidelity is not None:
            subset_batch_dict = {
                "covar_module.base_kernel.kernels.1.raw_lengthscale": -3,
                **subset_batch_dict,
            }
            if data_fidelity is not None:
                subset_batch_dict = {
                    "covar_module.base_kernel.kernels.2.raw_power": -2,
                    "covar_module.base_kernel.kernels.2.raw_offset": -2,
                    **subset_batch_dict,
                }

    return covar_module, subset_batch_dict
コード例 #5
0
 def test_initialize_lengthscale(self):
     kernel = ExponentialDecayKernel()
     kernel.initialize(lengthscale=1)
     actual_value = torch.tensor(1.0).view_as(kernel.lengthscale)
     self.assertLess(torch.norm(kernel.lengthscale - actual_value), 1e-5)
コード例 #6
0
 def create_kernel_no_ard(self, **kwargs):
     return ExponentialDecayKernel(**kwargs)
コード例 #7
0
 def test_initialize_offset_prior(self):
     kernel = ExponentialDecayKernel()
     kernel.offset_prior = NormalPrior(1, 1)
     self.assertTrue(isinstance(kernel.offset_prior, NormalPrior))
     kernel2 = ExponentialDecayKernel(offset_prior=GammaPrior(1, 1))
     self.assertTrue(isinstance(kernel2.offset_prior, GammaPrior))
コード例 #8
0
 def test_initialize_power_batch(self):
     kernel = ExponentialDecayKernel(batch_shape=torch.Size([2]))
     power_init = torch.tensor([1.0, 2.0])
     kernel.initialize(power=power_init)
     actual_value = power_init.view_as(kernel.power)
     self.assertLess(torch.norm(kernel.power - actual_value), 1e-5)
コード例 #9
0
 def test_initialize_power(self):
     kernel = ExponentialDecayKernel()
     kernel.initialize(power=1)
     actual_value = torch.tensor(1.0).view_as(kernel.power)
     self.assertLess(torch.norm(kernel.power - actual_value), 1e-5)
コード例 #10
0
 def test_initialize_offset_batch(self):
     kernel = ExponentialDecayKernel(batch_shape=torch.Size([2]))
     off_init = torch.tensor([1.0, 2.0])
     kernel.initialize(offset=off_init)
     actual_value = off_init.view_as(kernel.offset)
     self.assertLess(torch.norm(kernel.offset - actual_value), 1e-5)
コード例 #11
0
 def test_initialize_offset(self):
     kernel = ExponentialDecayKernel()
     kernel.initialize(offset=1)
     actual_value = torch.tensor(1.0).view_as(kernel.offset)
     self.assertLess(torch.norm(kernel.offset - actual_value), 1e-5)
コード例 #12
0
 def test_initialize_lengthscale_batch(self):
     kernel = ExponentialDecayKernel(batch_shape=torch.Size([2]))
     ls_init = torch.tensor([1.0, 2.0])
     kernel.initialize(lengthscale=ls_init)
     actual_value = ls_init.view_as(kernel.lengthscale)
     self.assertLess(torch.norm(kernel.lengthscale - actual_value), 1e-5)