def test_project_to_target_fidelity(self): for batch_shape, dtype in itertools.product( ([], [2]), (torch.float, torch.double)): X = torch.rand(*batch_shape, 3, 4, device=self.device, dtype=dtype) # test default behavior X_proj = project_to_target_fidelity(X) ones = torch.ones(*X.shape[:-1], 1, device=self.device, dtype=dtype) self.assertTrue(torch.equal(X_proj[..., :, [-1]], ones)) self.assertTrue(torch.equal(X_proj[..., :-1], X[..., :-1])) # test custom target fidelity target_fids = {2: 0.5} X_proj = project_to_target_fidelity(X, target_fidelities=target_fids) self.assertTrue(torch.equal(X_proj[..., :, [2]], 0.5 * ones)) # test multiple target fidelities target_fids = {2: 0.5, 0: 0.1} X_proj = project_to_target_fidelity(X, target_fidelities=target_fids) self.assertTrue(torch.equal(X_proj[..., :, [0]], 0.1 * ones)) self.assertTrue(torch.equal(X_proj[..., :, [2]], 0.5 * ones)) # test gradients X.requires_grad_(True) X_proj = project_to_target_fidelity(X, target_fidelities=target_fids) out = (X_proj**2).sum() out.backward() self.assertTrue(torch.all(X.grad[..., [0, 2]] == 0)) self.assertTrue( torch.equal(X.grad[..., [1, 3]], 2 * X[..., [1, 3]]))
def construct_inputs_mf_base( model: Model, training_data: TrainingData, target_fidelities: Dict[int, Union[int, float]], fidelity_weights: Optional[Dict[int, float]] = None, cost_intercept: float = 1.0, num_trace_observations: int = 0, **ignore: Any, ) -> Dict[str, Any]: r"""Construct kwargs for a multifidetlity acquisition function's constructor.""" if fidelity_weights is None: fidelity_weights = {f: 1.0 for f in target_fidelities} if set(target_fidelities) != set(fidelity_weights): raise RuntimeError( "Must provide the same indices for target_fidelities " f"({set(target_fidelities)}) and fidelity_weights " f" ({set(fidelity_weights)})." ) cost_aware_utility = InverseCostWeightedUtility( cost_model=AffineFidelityCostModel( fidelity_weights=fidelity_weights, fixed_cost=cost_intercept ) ) return { "target_fidelities": target_fidelities, "cost_aware_utility": cost_aware_utility, "expand": lambda X: expand_trace_observations( X=X, fidelity_dims=sorted(target_fidelities), num_trace_obs=num_trace_observations, ), "project": lambda X: project_to_target_fidelity( X=X, target_fidelities=target_fidelities ), }
def project(X: Tensor) -> Tensor: return project_to_target_fidelity( X=X, target_fidelities=target_fidelities)
def test_construct_inputs_mf_base(self): target_fidelities = {0: 0.123} fidelity_weights = {0: 0.456} cost_intercept = 0.789 num_trace_observations = 0 with self.subTest("test_fully_specified"): kwargs = construct_inputs_mf_base( model=mock.Mock(), training_data=self.bd_td, objective=LinearMCObjective(torch.rand(2)), target_fidelities=target_fidelities, fidelity_weights=fidelity_weights, cost_intercept=cost_intercept, num_trace_observations=num_trace_observations, ) self.assertEqual(kwargs["target_fidelities"], target_fidelities) X = torch.rand(3, 2) self.assertTrue(isinstance(kwargs["expand"], Callable)) self.assertTrue( torch.equal( kwargs["expand"](X), expand_trace_observations( X=X, fidelity_dims=sorted(target_fidelities), num_trace_obs=num_trace_observations, ), )) self.assertTrue(isinstance(kwargs["project"], Callable)) self.assertTrue( torch.equal( kwargs["project"](X), project_to_target_fidelity( X, target_fidelities=target_fidelities), )) cm = kwargs["cost_aware_utility"].cost_model w = torch.tensor(list(fidelity_weights.values()), dtype=cm.weights.dtype) self.assertEqual(cm.fixed_cost, cost_intercept) self.assertTrue(torch.allclose(cm.weights, w)) with self.subTest("test_missing_fidelity_weights"): kwargs = construct_inputs_mf_base( model=mock.Mock(), training_data=self.bd_td, objective=LinearMCObjective(torch.rand(2)), target_fidelities=target_fidelities, cost_intercept=cost_intercept, ) cm = kwargs["cost_aware_utility"].cost_model self.assertTrue( torch.allclose(cm.weights, torch.ones_like(cm.weights))) with self.subTest("test_mismatched_weights"): with self.assertRaisesRegex(RuntimeError, "Must provide the same indices for"): _ = construct_inputs_mf_base( model=mock.Mock(), training_data=self.bd_td, objective=LinearMCObjective(torch.rand(2)), target_fidelities={0: 1.0}, fidelity_weights={1: 0.5}, cost_intercept=cost_intercept, )