Exemplo n.º 1
0
    def test_last_dim_is_batch(self):
        a = (
            torch.tensor([[0.1, 0.2], [0.3, 0.4], [0.5, 0.5]])
            .view(3, 2)
            .transpose(-1, -2)
        )
        b = (
            torch.tensor([[0.5, 0.6], [0.7, 0.8], [0.6, 0.6]])
            .view(3, 2)
            .transpose(-1, -2)
        )
        power = 1
        offset = 1

        kernel = DownsamplingKernel()
        kernel.initialize(power=power, offset=offset)
        kernel.eval()
        res = kernel(a, b, last_dim_is_batch=True).evaluate()

        actual = torch.zeros(3, 2, 2)

        diff = torch.tensor([[0.45, 0.36], [0.4, 0.32]])
        actual[0, :, :] = offset + diff.pow(1 + power)

        diff = torch.tensor([[0.21, 0.14], [0.18, 0.12]])
        actual[1, :, :] = offset + diff.pow(1 + power)

        diff = torch.tensor([[0.2, 0.2], [0.2, 0.2]])
        actual[2, :, :] = offset + diff.pow(1 + power)
        self.assertLess(torch.norm(res - actual), 1e-5)
Exemplo n.º 2
0
    def test_subset_computes_active_downsampling_function_batch(self):
        a = torch.tensor([[0.1, 0.2, 0.2], [0.3, 0.4, 0.2], [0.5, 0.5, 0.5]]).view(
            3, 3, 1
        )
        a_p = torch.tensor([[0.1, 0.2, 0.2], [0.3, 0.4, 0.2], [0.5, 0.5, 0.5]]).view(
            3, 3, 1
        )
        a = torch.cat((a, a_p), 2)
        b = torch.tensor([[0.5, 0.6, 0.1], [0.7, 0.8, 0.2], [0.6, 0.6, 0.5]]).view(
            3, 3, 1
        )
        power = 1
        offset = 1
        kernel = DownsamplingKernel(batch_shape=torch.Size([3]), active_dims=[0])
        kernel.initialize(power=power, offset=offset)
        kernel.eval()
        res = kernel(a, b).evaluate()

        actual = torch.zeros(3, 3, 3)

        diff = torch.tensor([[0.45, 0.36, 0.81], [0.4, 0.32, 0.72], [0.4, 0.32, 0.72]])
        actual[0, :, :] = offset + diff.pow(1 + power)

        diff = torch.tensor(
            [[0.21, 0.14, 0.56], [0.18, 0.12, 0.48], [0.24, 0.16, 0.64]]
        )
        actual[1, :, :] = offset + diff.pow(1 + power)

        diff = torch.tensor([[0.2, 0.2, 0.25], [0.2, 0.2, 0.25], [0.2, 0.2, 0.25]])
        actual[2, :, :] = offset + diff.pow(1 + power)
        self.assertLess(torch.norm(res - actual), 1e-5)
Exemplo n.º 3
0
    def test_computes_downsampling_function(self):
        a = torch.tensor([0.1, 0.2]).view(2, 1)
        b = torch.tensor([0.2, 0.4]).view(2, 1)
        power = 1
        offset = 1

        kernel = DownsamplingKernel()
        kernel.initialize(power=power, offset=offset)
        kernel.eval()

        diff = torch.tensor([[0.72, 0.54], [0.64, 0.48]])
        actual = offset + diff.pow(1 + power)
        res = kernel(a, b).evaluate()

        self.assertLess(torch.norm(res - actual), 1e-5)
Exemplo n.º 4
0
    def test_diag_calculation(self):
        a = torch.tensor([0.1, 0.2]).view(2, 1)
        b = torch.tensor([0.2, 0.4]).view(2, 1)
        power = 1
        offset = 1

        kernel = DownsamplingKernel()
        kernel.initialize(power=power, offset=offset)
        kernel.eval()

        diff = torch.tensor([[0.72, 0.54], [0.64, 0.48]])
        actual = offset + diff.pow(1 + power)
        res = kernel(a, b, diag=True)

        self.assertLess(torch.norm(res - torch.diag(actual)), 1e-5)
Exemplo n.º 5
0
    def test_computes_downsampling_function_batch(self):
        a = torch.tensor([[0.1, 0.2], [0.3, 0.4], [0.5, 0.5]]).view(3, 2, 1)
        b = torch.tensor([[0.5, 0.6], [0.7, 0.8], [0.6, 0.6]]).view(3, 2, 1)
        power = 1
        offset = 1

        kernel = DownsamplingKernel(batch_shape=torch.Size([3]))
        kernel.initialize(power=power, offset=offset)
        kernel.eval()
        res = kernel(a, b).evaluate()

        actual = torch.zeros(3, 2, 2)

        diff = torch.tensor([[0.45, 0.36], [0.4, 0.32]])
        actual[0, :, :] = offset + diff.pow(1 + power)

        diff = torch.tensor([[0.21, 0.14], [0.18, 0.12]])
        actual[1, :, :] = offset + diff.pow(1 + power)

        diff = torch.tensor([[0.2, 0.2], [0.2, 0.2]])
        actual[2, :, :] = offset + diff.pow(1 + power)
        self.assertLess(torch.norm(res - actual), 1e-5)
Exemplo n.º 6
0
def _setup_multifidelity_covar_module(
    dim: int,
    aug_batch_shape: torch.Size,
    iteration_fidelity: Optional[int],
    data_fidelity: Optional[int],
    linear_truncated: bool,
    nu: float,
) -> Tuple[ScaleKernel, Dict]:
    """Helper function to get the covariance module and associated subset_batch_dict
    for the multifidelity setting.

    Args:
        dim: The dimensionality of the training data.
        aug_batch_shape: The output-augmented batch shape as defined in
            `BatchedMultiOutputGPyTorchModel`.
        iteration_fidelity: The column index for the training iteration fidelity
            parameter (optional).
        data_fidelity: The column index for the downsampling fidelity parameter
            (optional).
        linear_truncated: If True, use a `LinearTruncatedFidelityKernel` instead
            of the default kernel.
        nu: The smoothness parameter for the Matern kernel: either 1/2, 3/2, or
            5/2. Only used when `linear_truncated=True`.

    Returns:
        The covariance module and subset_batch_dict.
    """

    if iteration_fidelity is not None and iteration_fidelity < 0:
        iteration_fidelity = dim + iteration_fidelity
    if data_fidelity is not None and data_fidelity < 0:
        data_fidelity = dim + data_fidelity

    if linear_truncated:
        fidelity_dims = [
            i for i in (iteration_fidelity, data_fidelity) if i is not None
        ]
        kernel = LinearTruncatedFidelityKernel(
            fidelity_dims=fidelity_dims,
            dimension=dim,
            nu=nu,
            batch_shape=aug_batch_shape,
            power_prior=GammaPrior(3.0, 3.0),
        )
    else:
        active_dimsX = [
            i for i in range(dim)
            if i not in {iteration_fidelity, data_fidelity}
        ]
        kernel = RBFKernel(
            ard_num_dims=len(active_dimsX),
            batch_shape=aug_batch_shape,
            lengthscale_prior=GammaPrior(3.0, 6.0),
            active_dims=active_dimsX,
        )
        additional_kernels = []
        if iteration_fidelity is not None:
            exp_kernel = ExponentialDecayKernel(
                batch_shape=aug_batch_shape,
                lengthscale_prior=GammaPrior(3.0, 6.0),
                offset_prior=GammaPrior(3.0, 6.0),
                power_prior=GammaPrior(3.0, 6.0),
                active_dims=[iteration_fidelity],
            )
            additional_kernels.append(exp_kernel)
        if data_fidelity is not None:
            ds_kernel = DownsamplingKernel(
                batch_shape=aug_batch_shape,
                offset_prior=GammaPrior(3.0, 6.0),
                power_prior=GammaPrior(3.0, 6.0),
                active_dims=[data_fidelity],
            )
            additional_kernels.append(ds_kernel)
        kernel = ProductKernel(kernel, *additional_kernels)

    covar_module = ScaleKernel(kernel,
                               batch_shape=aug_batch_shape,
                               outputscale_prior=GammaPrior(2.0, 0.15))

    if linear_truncated:
        subset_batch_dict = {
            "covar_module.base_kernel.raw_power": -2,
            "covar_module.base_kernel.covar_module_unbiased.raw_lengthscale":
            -3,
            "covar_module.base_kernel.covar_module_biased.raw_lengthscale": -3,
        }
    else:
        subset_batch_dict = {
            "covar_module.base_kernel.kernels.0.raw_lengthscale": -3,
            "covar_module.base_kernel.kernels.1.raw_power": -2,
            "covar_module.base_kernel.kernels.1.raw_offset": -2,
        }
        if iteration_fidelity is not None:
            subset_batch_dict = {
                "covar_module.base_kernel.kernels.1.raw_lengthscale": -3,
                **subset_batch_dict,
            }
            if data_fidelity is not None:
                subset_batch_dict = {
                    "covar_module.base_kernel.kernels.2.raw_power": -2,
                    "covar_module.base_kernel.kernels.2.raw_offset": -2,
                    **subset_batch_dict,
                }

    return covar_module, subset_batch_dict
Exemplo n.º 7
0
 def test_initialize_offset_prior(self):
     kernel = DownsamplingKernel()
     kernel.offset_prior = NormalPrior(1, 1)
     self.assertTrue(isinstance(kernel.offset_prior, NormalPrior))
     kernel2 = DownsamplingKernel(offset_prior=GammaPrior(1, 1))
     self.assertTrue(isinstance(kernel2.offset_prior, GammaPrior))
Exemplo n.º 8
0
 def test_initialize_power_batch(self):
     kernel = DownsamplingKernel(batch_shape=torch.Size([2]))
     power_init = torch.tensor([1.0, 2.0])
     kernel.initialize(power=power_init)
     actual_value = power_init.view_as(kernel.power)
     self.assertLess(torch.norm(kernel.power - actual_value), 1e-5)
Exemplo n.º 9
0
 def create_kernel_no_ard(self, **kwargs):
     return DownsamplingKernel(**kwargs)
Exemplo n.º 10
0
 def test_initialize_power(self):
     kernel = DownsamplingKernel()
     kernel.initialize(power=1)
     actual_value = torch.tensor(1.0).view_as(kernel.power)
     self.assertLess(torch.norm(kernel.power - actual_value), 1e-5)
Exemplo n.º 11
0
 def test_initialize_offset_batch(self):
     kernel = DownsamplingKernel(batch_shape=torch.Size([2]))
     off_init = torch.tensor([1.0, 2.0])
     kernel.initialize(offset=off_init)
     actual_value = off_init.view_as(kernel.offset)
     self.assertLess(torch.norm(kernel.offset - actual_value), 1e-5)
Exemplo n.º 12
0
 def test_initialize_offset(self):
     kernel = DownsamplingKernel()
     kernel.initialize(offset=1)
     actual_value = torch.tensor(1.0).view_as(kernel.offset)
     self.assertLess(torch.norm(kernel.offset - actual_value), 1e-5)