def test_time_limits(self):
        seed = 1
        torch.manual_seed(seed)
        np.random.seed(seed)

        X, y = make_classification(
            n_samples=100,
            n_features=8,
            n_redundant=3,
            n_informative=5,
            random_state=1,
            n_clusters_per_class=4,
        )
        X, y = torch.Tensor(X), torch.Tensor(y)

        model = GPClassificationModel(
            lb=-3 * torch.ones(8),
            ub=3 * torch.ones(8),
            max_fit_time=0.5,
            inducing_size=10,
        )

        model.fit(X, y)
        generator = OptimizeAcqfGenerator(acqf=MCLevelSetEstimation,
                                          acqf_kwargs={
                                              "beta": 1.96,
                                              "target": 0.5
                                          })

        start = time.time()
        generator.gen(1, model)
        end = time.time()
        long = end - start
        generator = OptimizeAcqfGenerator(
            acqf=MCLevelSetEstimation,
            acqf_kwargs={
                "beta": 1.96,
                "target": 0.5
            },
            max_gen_time=0.1,
        )

        start = time.time()
        generator.gen(1, model)
        end = time.time()
        short = end - start

        # very loose test because fit time is only approximately computed
        self.assertTrue(long > short)
예제 #2
0
    def test_1d_query(self):
        seed = 1
        torch.manual_seed(seed)
        np.random.seed(seed)
        n_init = 150
        n_opt = 1
        lb = -4.0
        ub = 4.0

        target = 0.5

        def obj(x):
            return -((Normal(0, 1).cdf(x[..., 0]) - target) ** 2)

        # Test sine function with period 4
        def test_fun(x):
            return np.sin(np.pi * x / 4)

        strat_list = [
            Strategy(
                lb=lb,
                ub=ub,
                n_trials=n_init,
                generator=SobolGenerator(lb=lb, ub=ub, seed=seed),
            ),
            Strategy(
                lb=lb,
                ub=ub,
                model=GPClassificationModel(lb=lb, ub=ub, inducing_size=10),
                generator=OptimizeAcqfGenerator(
                    qUpperConfidenceBound,
                    acqf_kwargs={"beta": 1.96, "objective": GenericMCObjective(obj)},
                ),
                n_trials=n_opt,
            ),
        ]

        strat = SequentialStrategy(strat_list)

        for _i in range(n_init + n_opt):
            next_x = strat.gen()
            strat.add_data(next_x, [bernoulli.rvs(norm.cdf(test_fun(next_x)))])

        # We expect the global max to be at (2, 1), the min at (-2, -1)
        fmax, argmax = strat.get_max()
        self.assertTrue(np.abs(fmax - 1) < 0.5)
        self.assertTrue(np.abs(argmax[0] - 2) < 0.5)

        fmin, argmin = strat.get_min()
        self.assertTrue(np.abs(fmin + 1) < 0.5)
        self.assertTrue(np.abs(argmin[0] + 2) < 0.5)

        # Query at x=2 should be f=1
        self.assertTrue(np.abs(strat.predict(torch.tensor([2]))[0] - 1) < 0.5)

        # Inverse query at val 1 should return (1,[2])
        val, loc = strat.inv_query(1.0, constraints={})
        self.assertTrue(np.abs(val - 1) < 0.5)
        self.assertTrue(np.abs(loc[0] - 2) < 0.5)
예제 #3
0
    def test_1d_jnd(self):
        seed = 1
        torch.manual_seed(seed)
        np.random.seed(seed)
        n_init = 150
        n_opt = 1
        lb = -4.0
        ub = 4.0

        target = 0.5

        def obj(x):
            return -((Normal(0, 1).cdf(x[..., 0]) - target) ** 2)

        strat_list = [
            Strategy(
                lb=lb,
                ub=ub,
                n_trials=n_init,
                generator=SobolGenerator(lb=lb, ub=ub, seed=seed),
            ),
            Strategy(
                lb=lb,
                ub=ub,
                model=GPClassificationModel(lb=lb, ub=ub, inducing_size=10),
                generator=OptimizeAcqfGenerator(
                    qUpperConfidenceBound, acqf_kwargs={"beta": 1.96}
                ),
                n_trials=n_opt,
            ),
        ]

        strat = SequentialStrategy(strat_list)

        for _i in range(n_init + n_opt):
            next_x = strat.gen()
            strat.add_data(next_x, [bernoulli.rvs(norm.cdf(next_x / 1.5))])

        x = torch.linspace(-4, 4, 100)

        zhat, _ = strat.predict(x)

        # we expect jnd close to the target to be close to the correct
        # jnd (1.5), and since this is linear model this should be true
        # for both definitions of JND
        jnd_step = strat.get_jnd(grid=x[:, None], method="step")
        est_jnd_step = jnd_step[50]
        # looser test because step-jnd is hurt more by reverting to the mean
        self.assertTrue(np.abs(est_jnd_step - 1.5) < 0.5)

        jnd_taylor = strat.get_jnd(grid=x[:, None], method="taylor")
        est_jnd_taylor = jnd_taylor[50]
        self.assertTrue(np.abs(est_jnd_taylor - 1.5) < 0.25)
예제 #4
0
    def test_1d_single_targeting(self):

        seed = 1
        torch.manual_seed(seed)
        np.random.seed(seed)
        n_init = 50
        n_opt = 1
        lb = -4.0
        ub = 4.0

        target = 0.75

        def obj(x):
            return -((Normal(0, 1).cdf(x[..., 0]) - target) ** 2)

        strat_list = [
            Strategy(
                lb=lb,
                ub=ub,
                n_trials=n_init,
                generator=SobolGenerator(lb=lb, ub=ub, seed=seed),
            ),
            Strategy(
                lb=lb,
                ub=ub,
                model=GPClassificationModel(lb=lb, ub=ub, inducing_size=10),
                generator=OptimizeAcqfGenerator(
                    qUpperConfidenceBound, acqf_kwargs={"beta": 1.96}
                ),
                n_trials=n_opt,
            ),
        ]

        strat = SequentialStrategy(strat_list)

        for _i in range(n_init + n_opt):
            next_x = strat.gen()
            strat.add_data(next_x, [bernoulli.rvs(norm.cdf(next_x))])

        x = torch.linspace(-4, 4, 100)

        zhat, _ = strat.predict(x)

        # since target is 0.75, find the point at which f_est is 0.75
        est_max = x[np.argmin((norm.cdf(zhat.detach().numpy()) - 0.75) ** 2)]
        # since true z is just x, the true max is where phi(x)=0.75,
        self.assertTrue(np.abs(est_max - norm.ppf(0.75)) < 0.5)
예제 #5
0
    def test_1d_single_lse(self):

        seed = 1
        torch.manual_seed(seed)
        np.random.seed(seed)
        n_init = 50
        n_opt = 1
        lb = -4.0
        ub = 4.0

        # target is in z space not phi(z) space, maybe that's
        # weird
        extra_acqf_args = {"target": 0.75, "beta": 1.96}

        strat_list = [
            Strategy(
                lb=lb,
                ub=ub,
                n_trials=n_init,
                generator=SobolGenerator(lb=lb, ub=ub, seed=seed),
            ),
            Strategy(
                lb=lb,
                ub=ub,
                model=GPClassificationModel(lb=lb, ub=ub, inducing_size=10),
                n_trials=n_opt,
                generator=OptimizeAcqfGenerator(
                    MCLevelSetEstimation, acqf_kwargs=extra_acqf_args
                ),
            ),
        ]

        strat = SequentialStrategy(strat_list)

        for _i in range(n_init + n_opt):
            next_x = strat.gen()
            strat.add_data(next_x, [bernoulli.rvs(norm.cdf(next_x))])

        x = torch.linspace(-4, 4, 100)

        zhat, _ = strat.predict(x)
        # since target is 0.75, find the point at which f_est is 0.75
        est_max = x[np.argmin((norm.cdf(zhat.detach().numpy()) - 0.75) ** 2)]
        # since true z is just x, the true max is where phi(x)=0.75,
        self.assertTrue(np.abs(est_max - norm.ppf(0.75)) < 0.5)
예제 #6
0
    def test_1d_single_probit_pure_exploration(self):

        seed = 1
        torch.manual_seed(seed)
        np.random.seed(seed)
        n_init = 50
        n_opt = 1
        lb = -4.0
        ub = 4.0

        strat_list = [
            Strategy(
                lb=lb,
                ub=ub,
                n_trials=n_init,
                generator=SobolGenerator(lb=lb, ub=ub, seed=seed),
            ),
            Strategy(
                lb=lb,
                ub=ub,
                model=GPClassificationModel(lb=lb, ub=ub, inducing_size=10),
                generator=OptimizeAcqfGenerator(
                    qUpperConfidenceBound, acqf_kwargs={"beta": 1.96}
                ),
                n_trials=n_opt,
            ),
        ]

        strat = SequentialStrategy(strat_list)

        for _i in range(n_init + n_opt):
            next_x = strat.gen()
            strat.add_data(next_x, [bernoulli.rvs(norm.cdf(next_x))])

        x = torch.linspace(-4, 4, 100)

        zhat, _ = strat.predict(x)

        # f(x) = x so we're just looking at corr between cdf(zhat) and cdf(x)
        self.assertTrue(
            pearsonr(norm.cdf(zhat.detach().numpy()).flatten(), norm.cdf(x).flatten())[
                0
            ]
            > 0.95
        )
예제 #7
0
    def test_1d_single_probit(self):

        seed = 1
        torch.manual_seed(seed)
        np.random.seed(seed)
        n_init = 15
        n_opt = 20
        lb = -4.0
        ub = 4.0
        acqf = BernoulliMCMutualInformation
        extra_acqf_args = {"objective": ProbitObjective()}

        model_list = [
            Strategy(
                lb=lb,
                ub=ub,
                n_trials=n_init,
                generator=SobolGenerator(lb=lb, ub=ub, seed=seed),
            ),
            Strategy(
                lb=lb,
                ub=ub,
                model=GPClassificationModel(lb=lb, ub=ub, dim=1, inducing_size=10),
                generator=OptimizeAcqfGenerator(acqf, extra_acqf_args),
                n_trials=n_opt,
            ),
        ]

        strat = SequentialStrategy(model_list)

        for _i in range(n_init + n_opt):
            next_x = strat.gen()
            strat.add_data(next_x, [bernoulli.rvs(f_1d(next_x))])

        x = torch.linspace(-4, 4, 100)

        zhat, _ = strat.predict(x)

        true = f_1d(x.detach().numpy())
        est = zhat.detach().numpy()

        # close enough!
        self.assertTrue((((norm.cdf(est) - true) ** 2).mean()) < 0.25)
예제 #8
0
    def test_2d_single_probit_pure_exploration(self):
        seed = 1
        torch.manual_seed(seed)
        np.random.seed(seed)
        n_init = 50
        n_opt = 1
        lb = [-1, -1]
        ub = [1, 1]

        strat_list = [
            Strategy(
                lb=lb,
                ub=ub,
                n_trials=n_init,
                generator=SobolGenerator(lb=lb, ub=ub, seed=seed),
            ),
            Strategy(
                lb=lb,
                ub=ub,
                model=GPClassificationModel(lb=lb, ub=ub, inducing_size=10),
                generator=OptimizeAcqfGenerator(
                    qUpperConfidenceBound, acqf_kwargs={"beta": 1.96}
                ),
                n_trials=n_opt,
            ),
        ]

        strat = SequentialStrategy(strat_list)

        for _i in range(n_init + n_opt):
            next_x = strat.gen()
            strat.add_data(next_x, [bernoulli.rvs(cdf_new_novel_det(next_x))])

        xy = np.mgrid[-1:1:30j, -1:1:30j].reshape(2, -1).T
        post_mean, _ = strat.predict(torch.Tensor(xy))
        phi_post_mean = norm.cdf(post_mean.reshape(30, 30).detach().numpy())

        phi_post_true = cdf_new_novel_det(xy)

        self.assertTrue(
            pearsonr(phi_post_mean.flatten(), phi_post_true.flatten())[0] > 0.9
        )
예제 #9
0
    def test_1d_single_probit_new_interface(self):

        seed = 1
        torch.manual_seed(seed)
        np.random.seed(seed)
        n_init = 50
        n_opt = 1
        lb = -4.0
        ub = 4.0

        model_list = [
            Strategy(
                lb=lb,
                ub=ub,
                n_trials=n_init,
                generator=SobolGenerator(lb=lb, ub=ub, seed=seed),
            ),
            Strategy(
                lb=lb,
                ub=ub,
                model=GPClassificationModel(lb=lb, ub=ub, inducing_size=10),
                generator=OptimizeAcqfGenerator(
                    qUpperConfidenceBound, acqf_kwargs={"beta": 1.96}
                ),
                n_trials=n_opt,
            ),
        ]

        strat = SequentialStrategy(model_list)

        while not strat.finished:
            next_x = strat.gen()
            strat.add_data(next_x, [bernoulli.rvs(f_1d(next_x))])

        self.assertTrue(strat.y.shape[0] == n_init + n_opt)

        x = torch.linspace(-4, 4, 100)

        zhat, _ = strat.predict(x)

        # true max is 0, very loose test
        self.assertTrue(np.abs(x[np.argmax(zhat.detach().numpy())]) < 0.5)
예제 #10
0
    def test_2d_single_probit(self):

        seed = 1
        torch.manual_seed(seed)
        np.random.seed(seed)
        n_init = 150
        n_opt = 1
        lb = [-1, -1]
        ub = [1, 1]

        strat_list = [
            Strategy(
                lb=lb,
                ub=ub,
                n_trials=n_init,
                generator=SobolGenerator(lb=lb, ub=ub, seed=seed),
            ),
            Strategy(
                lb=lb,
                ub=ub,
                model=GPClassificationModel(lb=lb, ub=ub, inducing_size=20),
                generator=OptimizeAcqfGenerator(
                    qUpperConfidenceBound, acqf_kwargs={"beta": 1.96}
                ),
                n_trials=n_opt,
            ),
        ]

        strat = SequentialStrategy(strat_list)

        for _i in range(n_init + n_opt):
            next_x = strat.gen()
            strat.add_data(next_x, [bernoulli.rvs(f_2d(next_x[None, :]))])

        xy = np.mgrid[-1:1:30j, -1:1:30j].reshape(2, -1).T
        zhat, _ = strat.predict(torch.Tensor(xy))

        self.assertTrue(np.all(np.abs(xy[np.argmax(zhat.detach().numpy())]) < 0.5))
예제 #11
0
    def test_extra_ask_warns(self):
        # test that when we ask more times than we have models, we warn but keep going
        seed = 1
        torch.manual_seed(seed)
        np.random.seed(seed)
        n_init = 3
        n_opt = 1
        lb = -4.0
        ub = 4.0

        model_list = [
            Strategy(
                lb=lb,
                ub=ub,
                n_trials=n_init,
                generator=SobolGenerator(lb=lb, ub=ub, seed=seed),
            ),
            Strategy(
                lb=lb,
                ub=ub,
                model=GPClassificationModel(lb=lb, ub=ub, inducing_size=10),
                generator=OptimizeAcqfGenerator(
                    qUpperConfidenceBound, acqf_kwargs={"beta": 1.96}
                ),
                n_trials=n_opt,
            ),
        ]

        strat = SequentialStrategy(model_list)

        for _i in range(n_init + n_opt):
            next_x = strat.gen()
            strat.add_data(next_x, [bernoulli.rvs(norm.cdf(f_1d(next_x)))])

        with self.assertWarns(RuntimeWarning):
            strat.gen()