Example #1
0
    def _convert_configs(engine):
        Session = sessionmaker(bind=engine)
        session = Session()
        results = session.query(DbReplayTable).all()

        for result in results:
            if result.message_contents["type"] == "setup":
                config_str = result.message_contents["message"]["config_str"]
                config = Config(config_str=config_str)
                if config.version == "0.0":
                    config.convert("0.0", "0.1")
                new_str = str(config)

                new_message = {
                    "type": "setup",
                    "message": {
                        "config_str": new_str
                    }
                }
                if "version" in result.message_contents:
                    new_message["version"] = result.message_contents["version"]

                result.message_contents = new_message

        session.commit()
        logger.info("DbReplayTable : updated old configs.")
Example #2
0
    def test_monotonic_single_probit_config_file(self):
        config_file = "../configs/single_lse_example.ini"
        config_file = os.path.join(os.path.dirname(__file__), config_file)

        config = Config()
        config.update(config_fnames=[config_file])
        strat = SequentialStrategy.from_config(config)

        self.assertTrue(isinstance(strat.strat_list[0], SobolStrategy))
        self.assertTrue(isinstance(strat.strat_list[1], ModelWrapperStrategy))
        self.assertTrue(
            isinstance(strat.strat_list[1].modelbridge,
                       MonotonicSingleProbitModelbridge))
        self.assertTrue(strat.strat_list[1].modelbridge.acqf is MonotonicMCLSE)
        self.assertTrue(strat.strat_list[1].modelbridge.extra_acqf_args == {
            "beta": 3.98,
            "target": 0.75
        })
        self.assertTrue(strat.strat_list[1].modelbridge.samps == 1000)
        self.assertTrue(strat.strat_list[0].n_trials == 10)
        self.assertTrue(strat.strat_list[0].outcome_type == "single_probit")
        self.assertTrue(strat.strat_list[1].n_trials == 20)
        self.assertTrue(
            torch.all(strat.strat_list[0].lb == strat.strat_list[1].lb))
        self.assertTrue(
            torch.all(
                strat.strat_list[1].modelbridge.lb == torch.Tensor([0, 0])))
        self.assertTrue(
            torch.all(strat.strat_list[0].ub == strat.strat_list[1].ub))
        self.assertTrue(
            torch.all(
                strat.strat_list[1].modelbridge.ub == torch.Tensor([1, 1])))
    def test_song_factory_2d(self):
        conf = {
            "song_mean_covar_factory": {"lb": [0, 1], "ub": [1, 70], "target": 0.75}
        }
        config = Config(config_dict=conf)
        meanfun, covarfun = song_mean_covar_factory(config)
        self.assertTrue(covarfun.kernels[0].base_kernel.ard_num_dims == 1)
        self.assertTrue(covarfun.kernels[1].base_kernel.ard_num_dims == 1)
        self.assertTrue(isinstance(meanfun, gpytorch.means.ConstantMean))
        self.assertTrue(isinstance(covarfun, gpytorch.kernels.AdditiveKernel))
        self.assertTrue(isinstance(covarfun.kernels[0], gpytorch.kernels.ScaleKernel))
        self.assertTrue(isinstance(covarfun.kernels[1], gpytorch.kernels.ScaleKernel))
        self.assertTrue(
            isinstance(covarfun.kernels[0].base_kernel, gpytorch.kernels.RBFKernel)
        )
        self.assertTrue(covarfun.kernels[0].base_kernel.active_dims == 0)
        self.assertTrue(
            isinstance(covarfun.kernels[1].base_kernel, gpytorch.kernels.LinearKernel)
        )
        self.assertTrue(covarfun.kernels[1].base_kernel.active_dims == 1)

        # flip the stim dim
        conf = {
            "song_mean_covar_factory": {
                "lb": [0, 1],
                "ub": [1, 70],
                "target": 0.75,
                "stim_dim": 0,
            }
        }
        config = Config(config_dict=conf)
        meanfun, covarfun = song_mean_covar_factory(config)
        self.assertTrue(covarfun.kernels[1].base_kernel.active_dims == 0)
        self.assertTrue(covarfun.kernels[0].base_kernel.active_dims == 1)
Example #4
0
    def test_missing_config_file(self):
        config_file = "../configs/does_not_exist.ini"
        config_file = os.path.join(os.path.dirname(__file__), config_file)
        with self.assertRaises(FileNotFoundError):
            Config(config_fnames=[config_file])

        with self.assertRaises(FileNotFoundError):
            Config(config_fnames=[])
Example #5
0
    def test_name_conflict_warns(self):
        class DummyMod:
            pass

        Config.register_object(DummyMod)

        with self.assertWarns(Warning):
            Config.register_object(DummyMod)
 def from_config(cls, config: Config):
     classname = cls.__name__
     subgen_cls = config.getobj(classname,
                                "subgenerator",
                                fallback=OptimizeAcqfGenerator)
     subgen = subgen_cls.from_config(config)
     epsilon = config.getfloat(classname, "epsilon", fallback=0.1)
     return cls(subgenerator=subgen, epsilon=epsilon)
    def from_config(cls, config: Config):
        classname = cls.__name__

        lb = config.gettensor(classname, "lb")
        ub = config.gettensor(classname, "ub")
        dim = config.getint(classname, "dim", fallback=None)
        seed = config.getint(classname, "seed", fallback=None)

        return cls(lb=lb, ub=ub, dim=dim, seed=seed)
Example #8
0
    def test_conversion(self):
        config_str = """
        [common]
        parnames = [par1, par2]
        lb = [0, 0]
        ub = [1, 1]
        outcome_type = single_probit
        target = 0.75

        [SobolStrategy]
        n_trials = 10

        [ModelWrapperStrategy]
        n_trials = 20
        refit_every = 5

        [experiment]
        acqf = MonotonicMCLSE
        init_strat_cls = SobolStrategy
        opt_strat_cls = ModelWrapperStrategy
        modelbridge_cls = MonotonicSingleProbitModelbridge
        model = MonotonicRejectionGP

        [MonotonicMCLSE]
        beta = 3.98

        [MonotonicRejectionGP]
        inducing_size = 100
        mean_covar_factory = monotonic_mean_covar_factory

        [MonotonicSingleProbitModelbridge]
        restarts = 10
        samps = 1000
        """

        config = Config(config_str=config_str)
        self.assertEqual(config.version, "0.0")
        config.convert("0.0", "0.1")
        self.assertEqual(config.version, "0.1")

        self.assertEqual(config["common"]["strategy_names"],
                         "[init_strat, opt_strat]")
        self.assertEqual(config["common"]["acqf"], "MonotonicMCLSE")

        self.assertEqual(config["init_strat"]["n_trials"], "10")
        self.assertEqual(config["init_strat"]["generator"], "SobolGenerator")

        self.assertEqual(config["opt_strat"]["n_trials"], "20")
        self.assertEqual(config["opt_strat"]["refit_every"], "5")
        self.assertEqual(config["opt_strat"]["generator"],
                         "MonotonicRejectionGenerator")
        self.assertEqual(config["opt_strat"]["model"], "MonotonicRejectionGP")

        self.assertEqual(config["MonotonicRejectionGenerator"]["restarts"],
                         "10")
        self.assertEqual(config["MonotonicRejectionGenerator"]["samps"],
                         "1000")
Example #9
0
 def test_experiment_deprecation(self):
     config_str = """
         [experiment]
         acqf = PairwiseMCPosteriorVariance
         model = PairwiseProbitModel
         """
     config = Config()
     config.update(config_str=config_str)
     self.assertTrue("acqf" in config["common"])
     self.assertTrue("model" in config["common"])
Example #10
0
def monotonic_mean_covar_factory(
    config: Config,
) -> Tuple[ConstantMeanPartialObsGrad, gpytorch.kernels.ScaleKernel]:
    """Default factory for monotonic GP models based on derivative observations.

    Args:
        config (Config): Config containing (at least) bounds, and optionally LSE target.

    Returns:
        Tuple[ConstantMeanPartialObsGrad, gpytorch.kernels.ScaleKernel]: Instantiated mean and
            scaled RBF kernels with partial derivative observations.
    """
    lb = config.gettensor("monotonic_mean_covar_factory", "lb")
    ub = config.gettensor("monotonic_mean_covar_factory", "ub")
    assert lb.shape[0] == ub.shape[0], "bounds shape mismatch!"
    dim = lb.shape[0]
    fixed_mean = config.getboolean("monotonic_mean_covar_factory",
                                   "fixed_mean",
                                   fallback=False)

    mean = ConstantMeanPartialObsGrad()

    if fixed_mean:
        try:
            target = config.getfloat("monotonic_mean_covar_factory", "target")
            mean.constant.requires_grad_(False)
            mean.constant.copy_(torch.tensor([norm.ppf(target)]))
        except NoOptionError:
            raise RuntimeError(
                "Config got fixed_mean=True but no target included!")

    ls_prior = gpytorch.priors.GammaPrior(
        concentration=__default_invgamma_concentration,
        rate=__default_invgamma_rate,
        transform=lambda x: 1 / x,
    )
    ls_prior_mode = ls_prior.rate / (ls_prior.concentration + 1)
    ls_constraint = gpytorch.constraints.Positive(transform=None,
                                                  initial_value=ls_prior_mode)

    covar = gpytorch.kernels.ScaleKernel(
        RBFKernelPartialObsGrad(
            lengthscale_prior=ls_prior,
            lengthscale_constraint=ls_constraint,
            ard_num_dims=dim,
        ),
        outputscale_prior=gpytorch.priors.SmoothedBoxPrior(a=1, b=4),
    )

    return mean, covar
 def test_greedyepsilon_config(self):
     config_str = """
         [common]
         acqf = MonotonicMCLSE
         [EpsilonGreedyGenerator]
         subgenerator = MonotonicRejectionGenerator
         epsilon = .5
         """
     config = Config()
     config.update(config_str=config_str)
     gen = EpsilonGreedyGenerator.from_config(config)
     self.assertIsInstance(gen.subgenerator, MonotonicRejectionGenerator)
     self.assertEqual(gen.subgenerator.acqf, MonotonicMCLSE)
     self.assertEqual(gen.epsilon, 0.5)
Example #12
0
    def test_multiple_models_and_strats(self):
        config_str = """
        [common]
        lb = [0, 0]
        ub = [1, 1]
        outcome_type = single_probit
        parnames = [par1, par2]
        strategy_names = [init_strat, opt_strat1, opt_strat2]

        [init_strat]
        generator = SobolGenerator
        n_trials = 1

        [opt_strat1]
        generator = OptimizeAcqfGenerator
        n_trials = 1
        model = GPClassificationModel
        acqf = MCLevelSetEstimation

        [opt_strat2]
        generator = MonotonicRejectionGenerator
        n_trials = 1
        model = MonotonicRejectionGP
        acqf = MonotonicMCLSE
        """

        config = Config()
        config.update(config_str=config_str)

        strat = SequentialStrategy.from_config(config)

        self.assertTrue(
            isinstance(strat.strat_list[0].generator, SobolGenerator))
        self.assertTrue(strat.strat_list[0].model is None)

        self.assertTrue(
            isinstance(strat.strat_list[1].generator, OptimizeAcqfGenerator))
        self.assertTrue(
            isinstance(strat.strat_list[1].model, GPClassificationModel))
        self.assertTrue(
            strat.strat_list[1].generator.acqf is MCLevelSetEstimation)

        self.assertTrue(
            isinstance(strat.strat_list[2].generator,
                       MonotonicRejectionGenerator))
        self.assertTrue(
            isinstance(strat.strat_list[2].model, MonotonicRejectionGP))
        self.assertTrue(strat.strat_list[2].generator.acqf is MonotonicMCLSE)
Example #13
0
    def test_to_string(self):
        in_str = """
            [common]
            lb = [0, 0]
            ub = [1, 1]
            outcome_type = single_probit
            parnames = [par1, par2]
            strategy_names = [init_strat, opt_strat]
            model = GPClassificationModel
            acqf = LevelSetEstimation
            [init_strat]
            generator = SobolGenerator
            n_trials = 10
            [opt_strat]
            generator = OptimizeAcqfGenerator
            n_trials = 20
            [LevelSetEstimation]
            beta = 3.98
            objective = ProbitObjective
            [GPClassificationModel]
            inducing_size = 10
            mean_covar_factory = default_mean_covar_factory
            [OptimizeAcqfGenerator]
            restarts = 10
            samps = 1000""".strip().replace(" ", "")

        config = Config(config_str=in_str)
        out_str = str(config).strip().replace(" ", "")
        self.assertEqual(in_str, out_str)
    def test_sobol_config(self):
        config_str = """
                [common]
                lb = [0]
                ub = [1]
                parnames = [par1]

                [SobolGenerator]
                seed=12345
                """
        config = Config()
        config.update(config_str=config_str)
        gen = SobolGenerator.from_config(config)
        npt.assert_equal(gen.lb.numpy(), np.array([0]))
        npt.assert_equal(gen.ub.numpy(), np.array([1]))
        self.assertEqual(gen.seed, 12345)
Example #15
0
    def _get_acqf_options(cls, acqf: AcquisitionFunction, config: Config):
        if acqf is not None:
            acqf_name = acqf.__name__
            default_extra_acqf_args = {
                "beta": 3.98,
                "target": 0.75,
                "objective": None,
                "query_set_size": 512,
            }
            extra_acqf_args = {
                k: config.getobj(
                    acqf_name, k, fallback_type=float, fallback=v, warn=False
                )
                for k, v in default_extra_acqf_args.items()
            }
            acqf_args_expected = signature(acqf).parameters.keys()
            extra_acqf_args = {
                k: v for k, v in extra_acqf_args.items() if k in acqf_args_expected
            }
            if (
                "objective" in extra_acqf_args.keys()
                and extra_acqf_args["objective"] is not None
            ):
                extra_acqf_args["objective"] = extra_acqf_args["objective"]()
        else:
            extra_acqf_args = {}

        return extra_acqf_args
 def test_default_factory_2d(self):
     conf = {"default_mean_covar_factory": {"lb": [-2, 3], "ub": [1, 10]}}
     config = Config(config_dict=conf)
     meanfun, covarfun = default_mean_covar_factory(config)
     self.assertTrue(covarfun.base_kernel.ard_num_dims == 2)
     self.assertTrue(isinstance(meanfun, gpytorch.means.ConstantMean))
     self.assertTrue(isinstance(covarfun, gpytorch.kernels.ScaleKernel))
     self.assertTrue(isinstance(covarfun.base_kernel, gpytorch.kernels.RBFKernel))
    def from_config(cls, config: Config) -> MonotonicRejectionGP:
        classname = cls.__name__
        num_induc = config.gettensor(classname, "num_induc", fallback=25)
        num_samples = config.gettensor(classname, "num_samples", fallback=250)
        num_rejection_samples = config.getint(classname,
                                              "num_rejection_samples",
                                              fallback=5000)

        lb = config.gettensor(classname, "lb")
        ub = config.gettensor(classname, "ub")
        dim = config.getint(classname, "dim", fallback=None)

        mean_covar_factory = config.getobj(
            classname,
            "mean_covar_factory",
            fallback=monotonic_mean_covar_factory)

        mean, covar = mean_covar_factory(config)

        monotonic_idxs: List[int] = config.getlist(classname,
                                                   "monotonic_idxs",
                                                   fallback=[-1])

        return cls(
            monotonic_idxs=monotonic_idxs,
            lb=lb,
            ub=ub,
            dim=dim,
            num_induc=num_induc,
            num_samples=num_samples,
            num_rejection_samples=num_rejection_samples,
            mean_module=mean,
            covar_module=covar,
        )
 def test_monotonic_factory_1d(self):
     conf = {"monotonic_mean_covar_factory": {"lb": [0], "ub": [1]}}
     config = Config(config_dict=conf)
     meanfun, covarfun = monotonic_mean_covar_factory(config)
     self.assertTrue(covarfun.base_kernel.ard_num_dims == 1)
     self.assertTrue(isinstance(meanfun, ConstantMeanPartialObsGrad))
     self.assertTrue(isinstance(covarfun, gpytorch.kernels.ScaleKernel))
     self.assertTrue(isinstance(covarfun.base_kernel, RBFKernelPartialObsGrad))
     self.assertTrue(meanfun.constant.requires_grad)
Example #19
0
    def test_sobol_n_trials(self):
        for n_trials in [-1, 0, 1]:
            config_str = f"""
            [common]
            lb = [0]
            ub = [1]
            parnames = [par1]
            strategy_names = [init_strat]

            [init_strat]
            generator = SobolGenerator
            n_trials = {n_trials}
            """
            config = Config()
            config.update(config_str=config_str)
            strat = Strategy.from_config(config, "init_strat")
            self.assertEqual(strat.n_trials, n_trials)
            self.assertEqual(strat.finished, n_trials <= 0)
Example #20
0
    def make_strat_and_flatconfig(
        self, config_dict: Mapping[str, str]
    ) -> Tuple[SequentialStrategy, Dict[str, str]]:
        """From a config dict, generate a strategy (for running) and
            flattened config (for logging)

        Args:
            config_dict (Mapping[str, str]): A run configuration dictionary.

        Returns:
            Tuple[SequentialStrategy, Dict[str,str]]: A tuple containing a strategy
                object and a flat config.
        """
        config = Config()
        config.update(config_dict=config_dict)
        strat = SequentialStrategy.from_config(config)
        flatconfig = self.flatten_config(config)
        return strat, flatconfig
    def from_config(cls, config: Config):
        classname = cls.__name__
        acqf = config.getobj(classname, "acqf", fallback=None)
        extra_acqf_args = cls._get_acqf_options(acqf, config)

        restarts = config.getint(classname, "restarts", fallback=10)
        samps = config.getint(classname, "samps", fallback=1000)

        max_gen_time = config.getfloat(classname,
                                       "max_gen_time",
                                       fallback=None)

        return cls(
            acqf=acqf,
            acqf_kwargs=extra_acqf_args,
            restarts=restarts,
            samps=samps,
            max_gen_time=max_gen_time,
        )
Example #22
0
    def from_config(cls, config: Config):
        strat_names = config.getlist("common",
                                     "strategy_names",
                                     element_type=str)
        strats = []
        for name in strat_names:
            strat = Strategy.from_config(config, str(name))
            strats.append(strat)

        return cls(strat_list=strats)
 def test_song_factory_1d(self):
     conf = {"song_mean_covar_factory": {"lb": [0], "ub": [1]}}
     config = Config(config_dict=conf)
     meanfun, covarfun = song_mean_covar_factory(config)
     self.assertTrue(covarfun.kernels[0].base_kernel.ard_num_dims == 1)
     self.assertTrue(isinstance(meanfun, gpytorch.means.ConstantMean))
     self.assertTrue(isinstance(covarfun, gpytorch.kernels.AdditiveKernel))
     self.assertTrue(isinstance(covarfun.kernels[0], gpytorch.kernels.ScaleKernel))
     self.assertTrue(
         isinstance(covarfun.kernels[0].base_kernel, gpytorch.kernels.LinearKernel)
     )
Example #24
0
    def test_monotonic_single_probit_config_file(self):
        config_file = "../configs/single_lse_example.ini"
        config_file = os.path.join(os.path.dirname(__file__), config_file)

        config = Config()
        config.update(config_fnames=[config_file])
        strat = SequentialStrategy.from_config(config)

        self.assertTrue(
            isinstance(strat.strat_list[0].generator, SobolGenerator))
        self.assertTrue(strat.strat_list[0].model is None)

        self.assertTrue(
            isinstance(strat.strat_list[1].generator,
                       MonotonicRejectionGenerator))
        self.assertTrue(strat.strat_list[1].generator.acqf is MonotonicMCLSE)
        self.assertTrue(
            set(strat.strat_list[1].generator.acqf_kwargs.keys()) ==
            {"beta", "target", "objective"})
        self.assertTrue(
            strat.strat_list[1].generator.acqf_kwargs["target"] == 0.75)
        self.assertTrue(
            strat.strat_list[1].generator.acqf_kwargs["beta"] == 3.98)
        self.assertTrue(
            isinstance(
                strat.strat_list[1].generator.acqf_kwargs["objective"],
                ProbitObjective,
            ))
        self.assertTrue(strat.strat_list[1].generator.
                        model_gen_options["raw_samples"] == 1000)
        self.assertTrue(strat.strat_list[0].n_trials == 10)
        self.assertTrue(strat.strat_list[0].outcome_type == "single_probit")
        self.assertTrue(strat.strat_list[1].n_trials == 20)
        self.assertTrue(
            torch.all(strat.strat_list[0].lb == strat.strat_list[1].lb))
        self.assertTrue(
            torch.all(strat.strat_list[1].model.lb == torch.Tensor([0, 0])))
        self.assertTrue(
            torch.all(strat.strat_list[0].ub == strat.strat_list[1].ub))
        self.assertTrue(
            torch.all(strat.strat_list[1].model.ub == torch.Tensor([1, 1])))
Example #25
0
    def from_config(cls, config: Config):
        classname = cls.__name__
        n_samples = config.getint(classname, "num_samples", fallback=1)
        n_rejection_samples = config.getint(classname,
                                            "num_rejection_samples",
                                            fallback=500)
        num_ts_points = config.getint(classname,
                                      "num_ts_points",
                                      fallback=1000)
        target = config.getfloat(classname, "target", fallback=0.75)
        objective = config.getobj(classname,
                                  "objective",
                                  fallback=ProbitObjective)
        explore_features = config.getlist(classname,
                                          "explore_idxs",
                                          fallback=None)  # type: ignore

        return cls(
            n_samples=n_samples,
            n_rejection_samples=n_rejection_samples,
            num_ts_points=num_ts_points,
            target_value=target,
            objective=objective,
            explore_features=explore_features,
        )
 def test_randomgen_config(self):
     lb = [-1, 0]
     ub = [1, 2]
     config_str = f"""
     [common]
     lb = {lb}
     ub = {ub}
     """
     config = Config(config_str=config_str)
     gen = RandomGenerator.from_config(config)
     npt.assert_equal(gen.lb.numpy(), np.array(lb))
     npt.assert_equal(gen.ub.numpy(), np.array(ub))
     self.assertEqual(gen.dim, len(lb))
Example #27
0
    def from_config(cls, config: Config) -> GPClassificationModel:
        """Alternate constructor for GPClassification model.

        This is used when we recursively build a full sampling strategy
        from a configuration. TODO: document how this works in some tutorial.

        Args:
            config (Config): A configuration containing keys/values matching this class

        Returns:
            GPClassificationModel: Configured class instance.
        """

        classname = cls.__name__
        inducing_size = config.getint(classname, "inducing_size", fallback=10)

        lb = config.gettensor(classname, "lb")
        ub = config.gettensor(classname, "ub")
        dim = config.getint(classname, "dim", fallback=None)

        mean_covar_factory = config.getobj(classname,
                                           "mean_covar_factory",
                                           fallback=default_mean_covar_factory)

        mean, covar = mean_covar_factory(config)
        max_fit_time = config.getfloat(classname,
                                       "max_fit_time",
                                       fallback=None)

        inducing_point_method = config.get(classname,
                                           "inducing_point_method",
                                           fallback="auto")

        likelihood_cls = config.getobj(classname, "likelihood", fallback=None)

        if likelihood_cls is not None:
            if hasattr(likelihood_cls, "from_config"):
                likelihood = likelihood_cls.from_config(config)
            else:
                likelihood = likelihood_cls()
        else:
            likelihood = None  # fall back to __init__ default

        return cls(
            lb=lb,
            ub=ub,
            dim=dim,
            inducing_size=inducing_size,
            mean_module=mean,
            covar_module=covar,
            max_fit_time=max_fit_time,
            inducing_point_method=inducing_point_method,
            likelihood=likelihood,
        )
Example #28
0
    def flatten_config(self, config: Config) -> Dict[str, str]:
        """Flatten a config object for logging.

        Args:
            config (Config): AEPsych config object.

        Returns:
            Dict[str,str]: A flat dictionary (that can be used to build a flat pandas data frame).
        """
        flatconfig = {}
        for s in config.sections():
            flatconfig.update({f"{s}_{k}": v for k, v in config[s].items()})
        return flatconfig
Example #29
0
    def _configs_require_conversion(engine):
        Base.metadata.create_all(engine)
        Session = sessionmaker(bind=engine)
        session = Session()
        results = session.query(DbReplayTable).all()

        for result in results:
            if result.message_contents["type"] == "setup":
                config_str = result.message_contents["message"]["config_str"]
                config = Config(config_str=config_str)
                if config.version == "0.0":
                    return True  # assume that if any config needs to be refactored, all of them do

        return False
Example #30
0
    def test_nonmonotonic_optimization_config_file(self):
        config_file = "../configs/nonmonotonic_optimization_example.ini"
        config_file = os.path.join(os.path.dirname(__file__), config_file)

        config = Config()
        config.update(config_fnames=[config_file])
        strat = SequentialStrategy.from_config(config)

        self.assertTrue(
            isinstance(strat.strat_list[0].generator, SobolGenerator))
        self.assertTrue(strat.strat_list[0].model is None)

        self.assertTrue(
            isinstance(strat.strat_list[1].generator, OptimizeAcqfGenerator))
        self.assertTrue(
            strat.strat_list[1].generator.acqf is qNoisyExpectedImprovement)
        self.assertTrue(
            set(strat.strat_list[1].generator.acqf_kwargs.keys()) ==
            {"objective"})
        self.assertTrue(
            isinstance(
                strat.strat_list[1].generator.acqf_kwargs["objective"],
                ProbitObjective,
            ))

        self.assertTrue(strat.strat_list[0].n_trials == 10)
        self.assertTrue(strat.strat_list[0].outcome_type == "single_probit")
        self.assertTrue(strat.strat_list[1].n_trials == 20)
        self.assertTrue(
            torch.all(strat.strat_list[0].lb == strat.strat_list[1].lb))
        self.assertTrue(
            torch.all(strat.strat_list[1].model.lb == torch.Tensor([0, 0])))
        self.assertTrue(
            torch.all(strat.strat_list[0].ub == strat.strat_list[1].ub))
        self.assertTrue(
            torch.all(strat.strat_list[1].model.ub == torch.Tensor([1, 1])))