コード例 #1
0
 def test_expand_trace_observations(self):
     for batch_shape, dtype in itertools.product(
         ([], [2]), (torch.float, torch.double)
     ):
         q, d = 3, 4
         X = torch.rand(*batch_shape, q, d, device=self.device, dtype=dtype)
         # test nullop behavior
         self.assertTrue(torch.equal(expand_trace_observations(X), X))
         self.assertTrue(
             torch.equal(expand_trace_observations(X, fidelity_dims=[1]), X)
         )
         # test default behavior
         num_tr = 2
         X_expanded = expand_trace_observations(X, num_trace_obs=num_tr)
         self.assertEqual(
             X_expanded.shape, torch.Size(batch_shape + [q * (1 + num_tr), d])
         )
         for i in range(num_tr):
             X_sub = X_expanded[..., q * i : q * (i + 1), :]
             self.assertTrue(torch.equal(X_sub[..., :-1], X[..., :-1]))
             X_sub_expected = (1 - i / (num_tr + 1)) * X[..., :q, -1]
             self.assertTrue(torch.equal(X_sub[..., -1], X_sub_expected))
         # test custom fidelity dims
         fdims = [0, 2]
         num_tr = 3
         X_expanded = expand_trace_observations(
             X, fidelity_dims=fdims, num_trace_obs=num_tr
         )
         self.assertEqual(
             X_expanded.shape, torch.Size(batch_shape + [q * (1 + num_tr), d])
         )
         for j, i in itertools.product([1, 3], range(num_tr)):
             X_sub = X_expanded[..., q * i : q * (i + 1), j]
             self.assertTrue(torch.equal(X_sub, X[..., j]))
         for j, i in itertools.product(fdims, range(num_tr)):
             X_sub = X_expanded[..., q * i : q * (i + 1), j]
             X_sub_expected = (1 - i / (1 + num_tr)) * X[..., :q, j]
             self.assertTrue(torch.equal(X_sub, X_sub_expected))
         # test gradients
         num_tr = 2
         fdims = [1]
         X.requires_grad_(True)
         X_expanded = expand_trace_observations(
             X, fidelity_dims=fdims, num_trace_obs=num_tr
         )
         out = X_expanded.sum()
         out.backward()
         grad_exp = torch.full_like(X, 1 + num_tr)
         grad_exp[..., fdims] = 1 + sum(
             (i + 1) / (num_tr + 1) for i in range(num_tr)
         )
         self.assertTrue(torch.allclose(X.grad, grad_exp))
コード例 #2
0
ファイル: multi_fidelity.py プロジェクト: facebook/Ax
 def expand(X: Tensor) -> Tensor:
     return expand_trace_observations(
         X=X,
         fidelity_dims=sorted(target_fidelities),
         # pyre-fixme[16]: `Optional` has no attribute `get`.
         num_trace_obs=options.get(Keys.NUM_TRACE_OBSERVATIONS, 0),
     )
コード例 #3
0
def construct_inputs_mf_base(
    model: Model,
    training_data: TrainingData,
    target_fidelities: Dict[int, Union[int, float]],
    fidelity_weights: Optional[Dict[int, float]] = None,
    cost_intercept: float = 1.0,
    num_trace_observations: int = 0,
    **ignore: Any,
) -> Dict[str, Any]:
    r"""Construct kwargs for a multifidetlity acquisition function's constructor."""
    if fidelity_weights is None:
        fidelity_weights = {f: 1.0 for f in target_fidelities}

    if set(target_fidelities) != set(fidelity_weights):
        raise RuntimeError(
            "Must provide the same indices for target_fidelities "
            f"({set(target_fidelities)}) and fidelity_weights "
            f" ({set(fidelity_weights)})."
        )

    cost_aware_utility = InverseCostWeightedUtility(
        cost_model=AffineFidelityCostModel(
            fidelity_weights=fidelity_weights, fixed_cost=cost_intercept
        )
    )

    return {
        "target_fidelities": target_fidelities,
        "cost_aware_utility": cost_aware_utility,
        "expand": lambda X: expand_trace_observations(
            X=X,
            fidelity_dims=sorted(target_fidelities),
            num_trace_obs=num_trace_observations,
        ),
        "project": lambda X: project_to_target_fidelity(
            X=X, target_fidelities=target_fidelities
        ),
    }
コード例 #4
0
 def expand(X: Tensor) -> Tensor:
     return expand_trace_observations(
         X=X,
         fidelity_dims=sorted(target_fidelities),  # pyre-ignore: [6]
         num_trace_obs=num_trace_observations,
     )
コード例 #5
0
    def test_construct_inputs_mf_base(self):
        target_fidelities = {0: 0.123}
        fidelity_weights = {0: 0.456}
        cost_intercept = 0.789
        num_trace_observations = 0

        with self.subTest("test_fully_specified"):
            kwargs = construct_inputs_mf_base(
                model=mock.Mock(),
                training_data=self.bd_td,
                objective=LinearMCObjective(torch.rand(2)),
                target_fidelities=target_fidelities,
                fidelity_weights=fidelity_weights,
                cost_intercept=cost_intercept,
                num_trace_observations=num_trace_observations,
            )

            self.assertEqual(kwargs["target_fidelities"], target_fidelities)

            X = torch.rand(3, 2)
            self.assertTrue(isinstance(kwargs["expand"], Callable))
            self.assertTrue(
                torch.equal(
                    kwargs["expand"](X),
                    expand_trace_observations(
                        X=X,
                        fidelity_dims=sorted(target_fidelities),
                        num_trace_obs=num_trace_observations,
                    ),
                ))

            self.assertTrue(isinstance(kwargs["project"], Callable))
            self.assertTrue(
                torch.equal(
                    kwargs["project"](X),
                    project_to_target_fidelity(
                        X, target_fidelities=target_fidelities),
                ))

            cm = kwargs["cost_aware_utility"].cost_model
            w = torch.tensor(list(fidelity_weights.values()),
                             dtype=cm.weights.dtype)
            self.assertEqual(cm.fixed_cost, cost_intercept)
            self.assertTrue(torch.allclose(cm.weights, w))

        with self.subTest("test_missing_fidelity_weights"):
            kwargs = construct_inputs_mf_base(
                model=mock.Mock(),
                training_data=self.bd_td,
                objective=LinearMCObjective(torch.rand(2)),
                target_fidelities=target_fidelities,
                cost_intercept=cost_intercept,
            )
            cm = kwargs["cost_aware_utility"].cost_model
            self.assertTrue(
                torch.allclose(cm.weights, torch.ones_like(cm.weights)))

        with self.subTest("test_mismatched_weights"):
            with self.assertRaisesRegex(RuntimeError,
                                        "Must provide the same indices for"):
                _ = construct_inputs_mf_base(
                    model=mock.Mock(),
                    training_data=self.bd_td,
                    objective=LinearMCObjective(torch.rand(2)),
                    target_fidelities={0: 1.0},
                    fidelity_weights={1: 0.5},
                    cost_intercept=cost_intercept,
                )