Exemple #1
0
    def test_InverseCostWeightedUtility(self):
        for batch_shape in ([], [2]):
            for dtype in (torch.float, torch.double):
                # the event shape is `batch_shape x q x t`
                mean = 1 + torch.rand(
                    *batch_shape, 2, 1, device=self.device, dtype=dtype)
                mm = MockModel(MockPosterior(mean=mean))

                X = torch.randn(*batch_shape,
                                3,
                                2,
                                device=self.device,
                                dtype=dtype)
                deltas = torch.rand(4,
                                    *batch_shape,
                                    device=self.device,
                                    dtype=dtype)

                # test that sampler is required if use_mean=False
                icwu = InverseCostWeightedUtility(mm, use_mean=False)
                with self.assertRaises(RuntimeError):
                    icwu(X, deltas)

                # check warning for negative cost
                mm = MockModel(MockPosterior(mean=mean.clamp_max(-1e-6)))
                icwu = InverseCostWeightedUtility(mm)
                with warnings.catch_warnings(
                        record=True) as ws, settings.debug(True):
                    icwu(X, deltas)
                    self.assertTrue(
                        any(
                            issubclass(w.category, CostAwareWarning)
                            for w in ws))

                # basic test
                mm = MockModel(MockPosterior(mean=mean))
                icwu = InverseCostWeightedUtility(mm)
                ratios = icwu(X, deltas)
                self.assertTrue(
                    torch.equal(ratios, deltas / mean.squeeze(-1).sum(dim=-1)))

                # sampling test
                samples = 1 + torch.rand(  # event shape is q x m
                    *batch_shape,
                    3,
                    1,
                    device=self.device,
                    dtype=dtype)
                mm = MockModel(MockPosterior(samples=samples))
                icwu = InverseCostWeightedUtility(mm, use_mean=False)
                ratios = icwu(X, deltas, sampler=IIDNormalSampler(4))
                self.assertTrue(
                    torch.equal(ratios,
                                deltas / samples.squeeze(-1).sum(dim=-1)))
Exemple #2
0
def _instantiate_MES(
    model: Model,
    candidate_set: Tensor,
    num_fantasies: int = 16,
    num_mv_samples: int = 10,
    num_y_samples: int = 128,
    use_gumbel: bool = True,
    X_pending: Optional[Tensor] = None,
    maximize: bool = True,
    num_trace_observations: int = 0,
    target_fidelities: Optional[Dict[int, float]] = None,
    fidelity_weights: Optional[Dict[int, float]] = None,
    cost_intercept: float = 1.0,
) -> qMaxValueEntropy:
    if target_fidelities:
        if fidelity_weights is None:
            fidelity_weights = {f: 1.0 for f in target_fidelities}
        if not set(target_fidelities) == set(fidelity_weights):
            raise RuntimeError(
                "Must provide the same indices for target_fidelities "
                f"({set(target_fidelities)}) and fidelity_weights "
                f" ({set(fidelity_weights)})."
            )
        cost_model = AffineFidelityCostModel(
            fidelity_weights=fidelity_weights, fixed_cost=cost_intercept
        )
        cost_aware_utility = InverseCostWeightedUtility(cost_model=cost_model)

        def project(X: Tensor) -> Tensor:
            return project_to_target_fidelity(X=X, target_fidelities=target_fidelities)

        def expand(X: Tensor) -> Tensor:
            return expand_trace_observations(
                X=X,
                fidelity_dims=sorted(target_fidelities),  # pyre-ignore: [6]
                num_trace_obs=num_trace_observations,
            )

        return qMultiFidelityMaxValueEntropy(
            model=model,
            candidate_set=candidate_set,
            num_fantasies=num_fantasies,
            num_mv_samples=num_mv_samples,
            num_y_samples=num_y_samples,
            X_pending=X_pending,
            maximize=maximize,
            cost_aware_utility=cost_aware_utility,
            project=project,
            expand=expand,
        )

    return qMaxValueEntropy(
        model=model,
        candidate_set=candidate_set,
        num_fantasies=num_fantasies,
        num_mv_samples=num_mv_samples,
        num_y_samples=num_y_samples,
        X_pending=X_pending,
        maximize=maximize,
    )
Exemple #3
0
    def compute_model_dependencies(
        cls,
        surrogate: Surrogate,
        bounds: List[Tuple[float, float]],
        objective_weights: Tensor,
        target_fidelities: Dict[int, float],
        pending_observations: Optional[List[Tensor]] = None,
        outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,
        linear_constraints: Optional[Tuple[Tensor, Tensor]] = None,
        fixed_features: Optional[Dict[int, float]] = None,
        options: Optional[Dict[str, Any]] = None,
    ) -> Dict[str, Any]:

        dependencies = super().compute_model_dependencies(
            surrogate=surrogate,
            bounds=bounds,
            objective_weights=objective_weights,
            pending_observations=pending_observations,
            outcome_constraints=outcome_constraints,
            linear_constraints=linear_constraints,
            fixed_features=fixed_features,
            target_fidelities=target_fidelities,
            options=options,
        )

        options = options or {}

        fidelity_weights = options.get(Keys.FIDELITY_WEIGHTS, None)
        if fidelity_weights is None:
            fidelity_weights = {f: 1.0 for f in target_fidelities}
        if not set(target_fidelities) == set(fidelity_weights):
            raise RuntimeError(
                "Must provide the same indices for target_fidelities "
                f"({set(target_fidelities)}) and fidelity_weights "
                f" ({set(fidelity_weights)}).")

        cost_intercept = options.get(Keys.COST_INTERCEPT, 1.0)

        cost_model = AffineFidelityCostModel(fidelity_weights=fidelity_weights,
                                             fixed_cost=cost_intercept)
        cost_aware_utility = InverseCostWeightedUtility(cost_model=cost_model)

        def project(X: Tensor) -> Tensor:
            return project_to_target_fidelity(
                X=X, target_fidelities=target_fidelities)

        def expand(X: Tensor) -> Tensor:
            return expand_trace_observations(
                X=X,
                fidelity_dims=sorted(target_fidelities),
                # pyre-fixme[16]: `Optional` has no attribute `get`.
                num_trace_obs=options.get(Keys.NUM_TRACE_OBSERVATIONS, 0),
            )

        dependencies.update({
            Keys.COST_AWARE_UTILITY: cost_aware_utility,
            Keys.PROJECT: project,
            Keys.EXPAND: expand,
        })
        return dependencies
def construct_inputs_mf_base(
    model: Model,
    training_data: TrainingData,
    target_fidelities: Dict[int, Union[int, float]],
    fidelity_weights: Optional[Dict[int, float]] = None,
    cost_intercept: float = 1.0,
    num_trace_observations: int = 0,
    **ignore: Any,
) -> Dict[str, Any]:
    r"""Construct kwargs for a multifidetlity acquisition function's constructor."""
    if fidelity_weights is None:
        fidelity_weights = {f: 1.0 for f in target_fidelities}

    if set(target_fidelities) != set(fidelity_weights):
        raise RuntimeError(
            "Must provide the same indices for target_fidelities "
            f"({set(target_fidelities)}) and fidelity_weights "
            f" ({set(fidelity_weights)})."
        )

    cost_aware_utility = InverseCostWeightedUtility(
        cost_model=AffineFidelityCostModel(
            fidelity_weights=fidelity_weights, fixed_cost=cost_intercept
        )
    )

    return {
        "target_fidelities": target_fidelities,
        "cost_aware_utility": cost_aware_utility,
        "expand": lambda X: expand_trace_observations(
            X=X,
            fidelity_dims=sorted(target_fidelities),
            num_trace_obs=num_trace_observations,
        ),
        "project": lambda X: project_to_target_fidelity(
            X=X, target_fidelities=target_fidelities
        ),
    }
Exemple #5
0
def _instantiate_KG(
    model: Model,
    objective: AcquisitionObjective,
    qmc: bool = True,
    n_fantasies: int = 64,
    mc_samples: int = 256,
    num_trace_observations: int = 0,
    seed_inner: Optional[int] = None,
    seed_outer: Optional[int] = None,
    X_pending: Optional[Tensor] = None,
    current_value: Optional[Tensor] = None,
    target_fidelities: Optional[Dict[int, float]] = None,
    fidelity_weights: Optional[Dict[int, float]] = None,
    cost_intercept: float = 1.0,
) -> qKnowledgeGradient:
    r"""Instantiate either a `qKnowledgeGradient` or `qMultiFidelityKnowledgeGradient`
    acquisition function depending on whether `target_fidelities` is defined.
    """
    sampler_cls = SobolQMCNormalSampler if qmc else IIDNormalSampler
    fantasy_sampler = sampler_cls(num_samples=n_fantasies, seed=seed_outer)
    if isinstance(objective, MCAcquisitionObjective):
        inner_sampler = sampler_cls(num_samples=mc_samples, seed=seed_inner)
    else:
        inner_sampler = None
    if target_fidelities:
        if fidelity_weights is None:
            fidelity_weights = {f: 1.0 for f in target_fidelities}
        if not set(target_fidelities) == set(fidelity_weights):
            raise RuntimeError(
                "Must provide the same indices for target_fidelities "
                f"({set(target_fidelities)}) and fidelity_weights "
                f" ({set(fidelity_weights)}).")
        cost_model = AffineFidelityCostModel(fidelity_weights=fidelity_weights,
                                             fixed_cost=cost_intercept)
        cost_aware_utility = InverseCostWeightedUtility(cost_model=cost_model)

        def project(X: Tensor) -> Tensor:
            return project_to_target_fidelity(
                X=X, target_fidelities=target_fidelities)

        def expand(X: Tensor) -> Tensor:
            return expand_trace_observations(
                X=X,
                fidelity_dims=sorted(target_fidelities),  # pyre-ignore: [6]
                num_trace_obs=num_trace_observations,
            )

        return qMultiFidelityKnowledgeGradient(
            model=model,
            num_fantasies=n_fantasies,
            sampler=fantasy_sampler,
            objective=objective,
            inner_sampler=inner_sampler,
            X_pending=X_pending,
            current_value=current_value,
            cost_aware_utility=cost_aware_utility,
            project=project,
            expand=expand,
        )

    return qKnowledgeGradient(
        model=model,
        num_fantasies=n_fantasies,
        sampler=fantasy_sampler,
        objective=objective,
        inner_sampler=inner_sampler,
        X_pending=X_pending,
        current_value=current_value,
    )
    def __init__(
        self,
        model: Model,
        candidate_set: Tensor,
        num_fantasies: int = 16,
        num_mv_samples: int = 10,
        num_y_samples: int = 128,
        use_gumbel: bool = True,
        X_pending: Optional[Tensor] = None,
        maximize: bool = True,
        cost_aware_utility: Optional[CostAwareUtility] = None,
        project: Callable[[Tensor], Tensor] = lambda X: X,
        expand: Callable[[Tensor], Tensor] = lambda X: X,
        **kwargs: Any,
    ) -> None:
        r"""Single-outcome max-value entropy search acquisition function.

        Args:
            model: A fitted single-outcome model.
            candidate_set: A `n x d` Tensor including `n` candidate points to
                discretize the design space, which will be used to sample the
                max values from their posteriors.
            cost_aware_utility: A CostAwareUtility computing the cost-transformed
                utility from a candidate set and samples of increases in utility.
            num_fantasies: Number of fantasies to generate. The higher this
                number the more accurate the model (at the expense of model
                complexity and performance) and it's only used when `X_pending`
                is not `None`.
            num_mv_samples: Number of max value samples.
            num_y_samples: Number of posterior samples at specific design point `X`.
            use_gumbel: If True, use Gumbel approximation to sample the max values.
            X_pending: A `m x d`-dim Tensor of `m` design points that have been
                submitted for function evaluation but have not yet been evaluated.
            maximize: If True, consider the problem a maximization problem.
            cost_aware_utility: A CostAwareUtility computing the cost-transformed
                utility from a candidate set and samples of increases in utility.
            project: A callable mapping a `batch_shape x q x d` tensor of design
                points to a tensor of the same shape projected to the desired
                target set (e.g. the target fidelities in case of multi-fidelity
                optimization).
            expand: A callable mapping a `batch_shape x q x d` input tensor to
                a `batch_shape x (q + q_e)' x d`-dim output tensor, where the
                `q_e` additional points in each q-batch correspond to
                additional ("trace") observations.
        """
        super().__init__(
            model=model,
            candidate_set=candidate_set,
            num_fantasies=num_fantasies,
            num_mv_samples=num_mv_samples,
            num_y_samples=num_y_samples,
            X_pending=X_pending,
            use_gumbel=use_gumbel,
            maximize=maximize,
        )

        if cost_aware_utility is None:
            cost_model = AffineFidelityCostModel(fidelity_weights={-1: 1.0})
            cost_aware_utility = InverseCostWeightedUtility(cost_model=cost_model)

        self.cost_aware_utility = cost_aware_utility
        self.expand = expand
        self.project = project
        self._cost_sampler = None

        # @TODO make sure fidelity_dims align in project, expand & cost_aware_utility
        # It seems very difficult due to the current way of handling project/expand

        # resample max values after initializing self.project
        # so that the max value samples are at the highest fidelity
        self._sample_max_values()