예제 #1
0
    def test_add_bench(self):

        logger = BenchmarkLogger(log_every=2)
        problem = TestProblem(lb=[0], ub=[1])

        bench_1 = Benchmark(
            problem=problem,
            logger=logger,
            configs=self.bench_config,
            global_seed=2,
            n_reps=2,
        )
        bench_2 = Benchmark(
            problem=problem,
            logger=logger,
            configs=self.bench_config,
            global_seed=2,
            n_reps=2,
        )

        bench_combined = bench_1 + bench_2
        three_bench = combine_benchmarks(bench_1, bench_2, bench_1)

        self.assertTrue((len(bench_combined.combinations) == 12))
        self.assertTrue((len(three_bench.combinations) == 18))
        self.assertTrue((len(bench_1.combinations) == 6))
        self.assertTrue((len(bench_2.combinations) == 6))
예제 #2
0
    def test_bench_smoke(self):

        logger = BenchmarkLogger(log_every=2)
        problem = TestProblem(lb=[0], ub=[1])

        bench = Benchmark(
            problem=problem, logger=logger, configs=self.bench_config, n_reps=2
        )
        bench.run_benchmarks()

        out = bench.logger.pandas()

        # have as many final results as we expect
        self.assertTrue(len(out[out.final]) == bench.num_benchmarks)

        # have as many repetitions as we expect
        self.assertTrue(len(out.rep.unique()) == bench.n_reps)

        # reporting intervals are correct
        self.assertTrue((out[~out.final].trial_id % 2 == 0).all())

        # we don't run extra trials
        total_trials = out.SobolStrategy_n_trials.astype(
            int
        ) + out.ModelWrapperStrategy_n_trials.astype(int)
        self.assertTrue((out.trial_id <= total_trials).all())
예제 #3
0
 def run_experiment(self, config_dict, seed, rep):
     # copy things that we mutate
     local_config = deepcopy(config_dict)
     local_logger = BenchmarkLogger(log_every=self.logger.log_every)
     try:
         _ = super().run_experiment(local_config, local_logger, seed, rep)
     except Exception as e:
         logger.error(f"Error on config {config_dict}: {e}!")
         return e
     return local_logger
예제 #4
0
    def collate_benchmarks(self, wait=False):
        newfutures = []
        while self.futures:
            item = self.futures.pop()
            if wait or item.ready():
                result = item.get()
                if isinstance(result, BenchmarkLogger):
                    self.loggers.append(result)
            else:
                newfutures.append(item)

        self.futures = newfutures

        if len(self.loggers) > 0:
            out_logger = BenchmarkLogger()
            for logger in self.loggers:
                out_logger._log.extend(logger._log)
            self.logger = out_logger
예제 #5
0
 def test_monotonic_single_lse_eval(self):
     config = {
         "common": {
             "lb": "[-1, -1]",
             "ub": "[1, 1]",
             "outcome_type": "single_probit",
         },
         "experiment": {
             "acqf": "MonotonicMCLSE",
             "modelbridge_cls": "MonotonicSingleProbitModelbridge",
             "init_strat_cls": "SobolStrategy",
             "opt_strat_cls": "ModelWrapperStrategy",
             "model": "MonotonicRejectionGP",
         },
         "MonotonicMCLSE": {
             "target": 0.75,
             "beta": 3.98,
         },
         "MonotonicRejectionGP": {
             "inducing_size": 10,
             "mean_covar_factory": "monotonic_mean_covar_factory",
         },
         "MonotonicSingleProbitModelbridge": {
             "restarts": 10,
             "samps": 1000,
         },
         "SobolStrategy": {
             "n_trials": 50,
         },
         "ModelWrapperStrategy": {
             "n_trials": 1,
         },
     }
     problem = TestProblem(lb=[-1, -1], ub=[1, 1])
     logger = BenchmarkLogger(log_every=100)
     bench = Benchmark(problem=problem, configs=config, logger=logger)
     strat = bench.run_experiment(bench.combinations[0], logger, 0, 0)
     e = problem.evaluate(strat)
     self.assertTrue(e["mean_square_err_p"] < 0.05)
예제 #6
0
    def test_bench_pathos_partial(self):
        """
        test that we can launch async and get partial results
        """
        logger = BenchmarkLogger(log_every=2)
        problem = TestProblem(lb=[0], ub=[1], delay=True)

        bench = PathosBenchmark(
            problem=problem, logger=logger, configs=self.bench_config, n_reps=1
        )
        bench.start_benchmarks()
        # wait for something to finsh
        while len(bench.logger._log) == 0:
            time.sleep(0.1)
            bench.collate_benchmarks(wait=False)

        out = bench.logger.pandas()  # this should only be a partial result
        # have fewer than all the results
        self.assertTrue(len(out[out.final]) < bench.num_benchmarks)

        bench.collate_benchmarks(wait=True)  # wait for everything to finsh
        out = bench.logger.pandas()  # complete results

        self.assertTrue(len(out[out.final]) == bench.num_benchmarks)
def aggregate_bench_results(all_benchmarks):
    combo_logger = BenchmarkLogger(log_every=log_every)
    for bench in all_benchmarks:
        combo_logger._log.extend(bench.logger._log)
    out_pd = combo_logger.pandas()
    return out_pd
novel_bounds = [{"lb": [-1, -1], "ub": [1, 1]}, {"lb": [-1, -1], "ub": [1, 1]}]
song_phenotypes = ["Metabolic", "Sensory", "Metabolic+Sensory", "Older-normal"]
song_betavals = [0.2, 0.5, 1, 2, 5, 10]
song_testfuns = [
    make_songetal_testfun(p, b)
    for p, b in product(song_phenotypes, song_betavals)
]
song_bounds = [{"lb": [-3, -20], "ub": [4, 120]}] * len(song_testfuns)
song_names = [
    f"song_p{p}_b{b}" for p, b in product(song_phenotypes, song_betavals)
]
all_testfuns = song_testfuns + novel_testfuns
all_bounds = song_bounds + novel_bounds
all_names = song_names + novel_names

combo_logger = BenchmarkLogger(log_every=log_every)

# benchmark configs, have to subdivide into 5
# configs Sobol, MCLSETS, and Song vs ours get set up all differently
# Song benches
bench_config_nonsobol_song = {
    "common": {
        "outcome_type": "single_probit",
        "target": 0.75
    },
    "experiment": {
        "acqf": [
            "MCLevelSetEstimation",
            "BernoulliMCMutualInformation",
            "MCPosteriorVariance",
        ],
예제 #9
0
def plot_audiometric_lse_grids(sobol_trials,
                               opt_trials,
                               phenotype="Metabolic+Sensory",
                               beta=2):
    """
    Generates Fig. 8
    """

    logger = BenchmarkLogger(log_every=5)
    bench_rbf = {
        "common": {
            "pairwise": False,
            "target": 0.75
        },
        "experiment": {
            "acqf": "MonotonicMCLSE",
            "modelbridge_cls": "MonotonicSingleProbitModelbridge",
            "init_strat_cls": "SobolStrategy",
            "opt_strat_cls": "ModelWrapperStrategy",
            "model": "MonotonicRejectionGP",
            "parnames": "[context,intensity]",
        },
        "MonotonicMCLSE": {
            "target": 0.75,
            "beta": 3.98,
        },
        "MonotonicRejectionGP": {
            "inducing_size": 100,
            "mean_covar_factory": [
                "monotonic_mean_covar_factory",
            ],
            "monotonic_idxs": ["[1]", "[]"],
            "uniform_idxs": "[]",
        },
        "MonotonicSingleProbitModelbridge": {
            "restarts": 10,
            "samps": 1000
        },
        "SobolStrategy": {
            "n_trials": [sobol_trials],
        },
        "ModelWrapperStrategy": {
            "n_trials": [opt_trials],
            "refit_every": [refit_every],
        },
    }
    bench_song = {
        "common": {
            "pairwise": False,
            "target": 0.75
        },
        "experiment": {
            "acqf": "BernoulliMCMutualInformation",
            "modelbridge_cls": "SingleProbitModelbridgeWithSongHeuristic",
            "init_strat_cls": "SobolStrategy",
            "opt_strat_cls": "ModelWrapperStrategy",
            "model": "GPClassificationModel",
            "parnames": "[context,intensity]",
        },
        "GPClassificationModel": {
            "inducing_size": 100,
            "dim": 2,
            "mean_covar_factory": [
                "song_mean_covar_factory",
            ],
        },
        "SingleProbitModelbridgeWithSongHeuristic": {
            "restarts": 10,
            "samps": 1000
        },
        "SobolStrategy": {
            "n_trials": [sobol_trials],
        },
        "ModelWrapperStrategy": {
            "n_trials": [opt_trials],
            "refit_every": [refit_every],
        },
    }

    all_bench_configs = [bench_rbf, bench_song]

    testfun = make_songetal_testfun(phenotype=phenotype, beta=beta)

    class AudiometricProblem(LSEProblem, Problem):
        def f(self, x):
            return testfun(x)

    lb = [-3, -20]
    ub = [4, 120]
    benches = []
    problem = AudiometricProblem(lb, ub)
    for config in all_bench_configs:
        full_config = copy(config)
        full_config["common"]["lb"] = str(lb)
        full_config["common"]["ub"] = str(ub)
        benches.append(
            Benchmark(
                problem=problem,
                logger=logger,
                configs=full_config,
                global_seed=global_seed,
                n_reps=1,
            ))
    combo_bench = combine_benchmarks(*benches)
    strats = []

    for config in combo_bench.combinations:
        strat = combo_bench.run_experiment(config,
                                           logger,
                                           seed=global_seed,
                                           rep=0)
        strats.append(strat)

    titles = [
        "Monotonic RBF Model, LSE (ours)",
        "Nonmonotonic RBF Model, LSE (ours)",
        "Linear-Additive Model, BALD",
    ]
    fig, axes = plt.subplots(2, 2, figsize=(7.5, 6.5))
    plotting_axes = [axes[1, 0], axes[0, 1], axes[0, 0]]
    fig.delaxes(axes[1, 1])
    _ = [
        plot_strat(strat=strat_,
                   title=title_,
                   ax=ax_,
                   true_testfun=testfun,
                   xlabel="Frequency (kHz)",
                   ylabel="Intensity (dB HL)",
                   flipx=True,
                   logx=True,
                   show=False,
                   include_legend=False,
                   include_colorbar=False)
        for ax_, strat_, title_ in zip(plotting_axes, strats, titles)
    ]
    fig.tight_layout()
    handles, labels = axes[1, 0].get_legend_handles_labels()

    fig.legend(handles, labels, loc="lower right", bbox_to_anchor=(0.8, 0.2))
    cbr = fig.colorbar(axes[1, 0].images[0], ax=plotting_axes)
    cbr.set_label("Probability of Detection")

    return fig
예제 #10
0
def plot_acquisition_examples(sobol_trials, opt_trials, target_level=0.75):
    ### Same model, different acqf figure ####

    configs = {
        "common": {
            "pairwise": False,
            "target": target_level,
            "lb": "[-3]",
            "ub": "[3]",
        },
        "experiment": {
            "acqf": [
                "MonotonicMCPosteriorVariance",
                "MonotonicBernoulliMCMutualInformation",
                "MonotonicMCLSE",
            ],
            "modelbridge_cls":
            "MonotonicSingleProbitModelbridge",
            "init_strat_cls":
            "SobolStrategy",
            "opt_strat_cls":
            "ModelWrapperStrategy",
            "model":
            "MonotonicRejectionGP",
            "parnames":
            "[intensity]",
        },
        "MonotonicMCLSE": {
            "target": target_level,
            "beta": 3.98,
        },
        "MonotonicRejectionGP": {
            "inducing_size": 100,
            "mean_covar_factory": "monotonic_mean_covar_factory",
            "monotonic_idxs": "[0]",
            "uniform_idxs": "[]",
        },
        "MonotonicSingleProbitModelbridge": {
            "restarts": 10,
            "samps": 1000
        },
        "SobolStrategy": {
            "n_trials": sobol_trials
        },
        "ModelWrapperStrategy": {
            "n_trials": opt_trials,
            "refit_every": refit_every,
        },
    }

    def true_testfun(x):
        return norm.cdf(3 * x)

    class SimpleLinearProblem(Problem):
        def f(self, x):
            return norm.ppf(true_testfun(x))

    lb = [-3]
    ub = [3]

    logger = BenchmarkLogger()
    problem = SimpleLinearProblem(lb, ub)
    bench = Benchmark(
        problem=problem,
        logger=logger,
        configs=configs,
        global_seed=global_seed,
        n_reps=1,
    )

    # sobol_trials
    # now run each for just init trials, taking care to reseed each time
    strats = []
    for c in bench.combinations:
        np.random.seed(global_seed)
        torch.manual_seed(global_seed)
        s = SequentialStrategy.from_config(Config(config_dict=c))
        for _ in range(sobol_trials):
            next_x = s.gen()
            s.add_data(next_x, [problem.sample_y(next_x)])
        strats.append(s)

    # get first gen from all 3
    first_gens = [s.gen() for s in strats]

    fig, ax = plt.subplots(2, 2)
    plot_strat(
        strat=strats[0],
        title=f"First active trial\n (after {sobol_trials} Sobol trials)",
        ax=ax[0, 0],
        true_testfun=true_testfun,
        target_level=target_level,
        show=False,
        include_legend=False)
    samps = [
        norm.cdf(s.sample(torch.Tensor(g), num_samples=10000))
        for s, g in zip(strats, first_gens)
    ]
    predictions = [np.mean(s) for s in samps]
    names = ["First BALV sample", "First BALD sample", "First LSE sample"]
    markers = ["s", "*", "^"]
    for i in range(3):
        ax[0, 0].scatter(
            first_gens[i][0][0],
            predictions[i],
            label=names[i],
            marker=markers[i],
            color="black",
        )

    # now run them all for the full duration
    for s in strats:
        for _tr in range(opt_trials):
            next_x = s.gen()
            s.add_data(next_x, [problem.sample_y(next_x)])

    plotting_axes = [ax[0, 1], ax[1, 0], ax[1, 1]]

    titles = [
        f"Monotonic RBF Model,\n BALV, after {sobol_trials+opt_trials} total trials",
        f"Monotonic RBF Model,\n BALD, after {sobol_trials+opt_trials} total trials",
        f"Monotonic RBF Model,\n LSE (ours) after {sobol_trials+opt_trials} total trials",
    ]

    _ = [
        plot_strat(strat=s,
                   title=t,
                   ax=a,
                   true_testfun=true_testfun,
                   target_level=target_level,
                   show=False,
                   include_legend=False)
        for a, s, t in zip(plotting_axes, strats, titles)
    ]
    fig.tight_layout()
    handles, labels = ax[0, 0].get_legend_handles_labels()
    lgd = fig.legend(handles,
                     labels,
                     loc="lower right",
                     bbox_to_anchor=(1.5, 0.25))
    # return legend so savefig works correctly
    return fig, lgd
예제 #11
0
def plot_novel_lse_grids(sobol_trials, opt_trials, funtype="detection"):
    """
    Generates Fig. TBA
    """

    logger = BenchmarkLogger(
        log_every=opt_trials)  # we only care about final perf
    bench_rbf = {
        "common": {
            "pairwise": False,
            "target": 0.75
        },
        "experiment": {
            "acqf": "MonotonicMCLSE",
            "modelbridge_cls": "MonotonicSingleProbitModelbridge",
            "init_strat_cls": "SobolStrategy",
            "opt_strat_cls": "ModelWrapperStrategy",
            "model": "MonotonicRejectionGP",
            "parnames": "[context,intensity]",
        },
        "MonotonicMCLSE": {
            "target": 0.75,
            "beta": 3.98,
        },
        "MonotonicRejectionGP": {
            "inducing_size": 100,
            "mean_covar_factory": [
                "monotonic_mean_covar_factory",
            ],
            "monotonic_idxs": ["[1]", "[]"],
            "uniform_idxs": "[]",
        },
        "MonotonicSingleProbitModelbridge": {
            "restarts": 10,
            "samps": 1000
        },
        "SobolStrategy": {
            "n_trials": [sobol_trials],
        },
        "ModelWrapperStrategy": {
            "n_trials": [opt_trials],
            "refit_every": [refit_every],
        },
    }
    bench_song = {
        "common": {
            "pairwise": False,
            "target": 0.75
        },
        "experiment": {
            "acqf": "BernoulliMCMutualInformation",
            "modelbridge_cls": "SingleProbitModelbridgeWithSongHeuristic",
            "init_strat_cls": "SobolStrategy",
            "opt_strat_cls": "ModelWrapperStrategy",
            "model": "GPClassificationModel",
            "parnames": "[context,intensity]",
        },
        "GPClassificationModel": {
            "inducing_size": 100,
            "dim": 2,
            "mean_covar_factory": [
                "song_mean_covar_factory",
            ],
        },
        "SingleProbitModelbridgeWithSongHeuristic": {
            "restarts": 10,
            "samps": 1000
        },
        "SobolStrategy": {
            "n_trials": [sobol_trials],
        },
        "ModelWrapperStrategy": {
            "n_trials": [opt_trials],
            "refit_every": [refit_every],
        },
    }
    all_bench_configs = [bench_rbf, bench_song]

    if funtype == "detection":
        testfun = novel_detection_testfun
        yes_label = "Detected trial"
        no_label = "Nondetected trial"
    elif funtype == "discrimination":
        testfun = novel_discrimination_testfun
        yes_label = "Correct trial"
        no_label = "Incorrect trial"
    else:
        raise RuntimeError("unknown testfun")

    class NovelProblem(LSEProblem, Problem):
        def f(self, x):
            return testfun(x)

    lb = [-1, -1]
    ub = [1, 1]
    benches = []
    problem = NovelProblem(lb, ub, gridsize=50)
    for config in all_bench_configs:
        full_config = copy(config)
        full_config["common"]["lb"] = str(lb)
        full_config["common"]["ub"] = str(ub)
        benches.append(
            Benchmark(
                problem=problem,
                logger=logger,
                configs=full_config,
                global_seed=global_seed,
                n_reps=1,
            ))
    combo_bench = combine_benchmarks(*benches)
    strats = []

    for config in combo_bench.combinations:
        strat = combo_bench.run_experiment(config,
                                           logger,
                                           seed=global_seed,
                                           rep=0)
        strats.append(strat)

    titles = [
        "Monotonic RBF Model, LSE (ours)",
        "Nonmonotonic RBF Model, LSE (ours)",
        "Linear-Additive Model, BALD",
    ]
    fig, axes = plt.subplots(2, 2, figsize=(7.5, 6.5))
    plotting_axes = [axes[1, 0], axes[0, 1], axes[0, 0]]
    fig.delaxes(axes[1, 1])
    _ = [
        plot_strat(strat=strat_,
                   title=title_,
                   ax=ax_,
                   true_testfun=testfun,
                   yes_label=yes_label,
                   no_label=no_label,
                   show=False,
                   include_legend=False,
                   include_colorbar=False)
        for ax_, strat_, title_ in zip(plotting_axes, strats, titles)
    ]
    fig.tight_layout()
    handles, labels = axes[1, 0].get_legend_handles_labels()

    fig.legend(handles, labels, loc="lower right", bbox_to_anchor=(0.8, 0.2))
    cbr = fig.colorbar(axes[1, 0].images[0], ax=plotting_axes)
    cbr.set_label("Probability of Detection")

    return fig