Beispiel #1
0
def local(  # noqa: C901
    tuning_config,
    acq_function="mes",
    acq_function_samples=1,
    confidence=0.9,
    data_path=None,
    gp_burnin=5,
    gp_samples=300,
    gp_initial_burnin=100,
    gp_initial_samples=300,
    logfile="log.txt",
    n_initial_points=30,
    n_points=500,
    plot_every=5,
    plot_path="plots",
    random_seed=0,
    result_every=5,
    resume=True,
    verbose=False,
):
    """Run a local tune.

    Parameters defined in the `tuning_config` file always take precedence.
    """
    json_dict = json.load(tuning_config)
    settings, commands, fixed_params, param_ranges = load_tuning_config(json_dict)
    log_level = logging.DEBUG if verbose else logging.INFO
    log_format = logging.Formatter("%(asctime)s %(levelname)-8s %(message)s")
    root_logger = logging.getLogger()
    root_logger.setLevel(log_level)
    file_logger = logging.FileHandler(settings.get("logfile", logfile))
    file_logger.setFormatter(log_format)
    root_logger.addHandler(file_logger)
    console_logger = logging.StreamHandler(sys.stdout)
    console_logger.setFormatter(log_format)
    root_logger.addHandler(console_logger)
    logging.debug(f"Got the following tuning settings:\n{json_dict}")

    # 1. Create seed sequence
    ss = np.random.SeedSequence(settings.get("random_seed", random_seed))
    # 2. Create kernel
    # 3. Create optimizer
    random_state = np.random.RandomState(np.random.MT19937(ss.spawn(1)[0]))
    opt = Optimizer(
        dimensions=list(param_ranges.values()),
        n_points=settings.get("n_points", n_points),
        n_initial_points=settings.get("n_initial_points", n_initial_points),
        # gp_kernel=kernel,  # TODO: Let user pass in different kernels
        gp_kwargs=dict(normalize_y=True),
        # gp_priors=priors,  # TODO: Let user pass in priors
        acq_func=settings.get("acq_function", acq_function),
        acq_func_kwargs=dict(alpha="inf", n_thompson=20),
        random_state=random_state,
    )
    X = []
    y = []
    noise = []
    iteration = 0

    # 3.1 Resume from existing data:
    if data_path is None:
        data_path = "data.npz"
    if resume:
        path = pathlib.Path(data_path)
        if path.exists():
            with np.load(path) as importa:
                X = importa["arr_0"].tolist()
                y = importa["arr_1"].tolist()
                noise = importa["arr_2"].tolist()
            if len(X[0]) != opt.space.n_dims:
                logging.error(
                    "The number of parameters are not matching the number of "
                    "dimensions. Rename the existing data file or ensure that the "
                    "parameter ranges are correct."
                )
                sys.exit(1)
            reduction_needed, X_reduced, y_reduced, noise_reduced = reduce_ranges(
                X, y, noise, opt.space
            )
            if reduction_needed:
                backup_path = path.parent / (
                    path.stem + f"_backup_{int(time.time())}" + path.suffix
                )
                logging.warning(
                    f"The parameter ranges are smaller than the existing data. "
                    f"Some points will have to be discarded. "
                    f"The original {len(X)} data points will be saved to "
                    f"{backup_path}"
                )
                np.savez_compressed(
                    backup_path, np.array(X), np.array(y), np.array(noise)
                )
                X = X_reduced
                y = y_reduced
                noise = noise_reduced

            iteration = len(X)
            logging.info(
                f"Importing {iteration} existing datapoints. This could take a while..."
            )
            opt.tell(
                X,
                y,
                noise_vector=noise,
                gp_burnin=settings.get("gp_initial_burnin", gp_initial_burnin),
                gp_samples=settings.get("gp_initial_samples", gp_initial_samples),
                n_samples=settings.get("n_samples", 1),
                progress=True,
            )
            logging.info("Importing finished.")

    # 4. Main optimization loop:
    while True:
        logging.info("Starting iteration {}".format(iteration))
        result_every_n = settings.get("result_every", result_every)
        if (
            result_every_n > 0
            and iteration % result_every_n == 0
            and opt.gp.chain_ is not None
        ):
            result_object = create_result(Xi=X, yi=y, space=opt.space, models=[opt.gp])
            try:
                best_point, best_value = expected_ucb(result_object, alpha=0.0)
                best_point_dict = dict(zip(param_ranges.keys(), best_point))
                logging.info(f"Current optimum:\n{best_point_dict}")
                logging.info(f"Estimated value: {best_value}")
                confidence_val = settings.get("confidence", confidence)
                confidence_out = confidence_intervals(
                    optimizer=opt,
                    param_names=list(param_ranges.keys()),
                    hdi_prob=confidence_val,
                    opt_samples=1000,
                    multimodal=False,
                )
                logging.info(
                    f"{confidence_val*100}% confidence intervals:\n{confidence_out}"
                )
            except ValueError:
                logging.info(
                    "Computing current optimum was not successful. "
                    "This can happen in rare cases and running the "
                    "tuner again usually works."
                )
        plot_every_n = settings.get("plot_every", plot_every)
        if (
            plot_every_n > 0
            and iteration % plot_every_n == 0
            and opt.gp.chain_ is not None
        ):
            logging.getLogger("matplotlib.font_manager").disabled = True
            if opt.space.n_dims == 1:
                logging.warning(
                    "Plotting for only 1 parameter is not supported yet."
                )
            else:
                logging.debug("Starting to compute the next plot.")
                result_object = create_result(
                    Xi=X, yi=y, space=opt.space, models=[opt.gp]
                )
                plt.style.use("dark_background")
                fig, ax = plt.subplots(
                    nrows=opt.space.n_dims,
                    ncols=opt.space.n_dims,
                    figsize=(3 * opt.space.n_dims, 3 * opt.space.n_dims),
                )
                fig.patch.set_facecolor("#36393f")
                for i in range(opt.space.n_dims):
                    for j in range(opt.space.n_dims):
                        ax[i, j].set_facecolor("#36393f")
                timestr = time.strftime("%Y%m%d-%H%M%S")
                plot_objective(
                    result_object, dimensions=list(param_ranges.keys()), fig=fig, ax=ax
                )
                plotpath = pathlib.Path(settings.get("plot_path", plot_path))
                plotpath.mkdir(parents=True, exist_ok=True)
                full_plotpath = plotpath / f"{timestr}-{iteration}.png"
                plt.savefig(
                    full_plotpath,
                    pad_inches=0.1,
                    dpi=300,
                    bbox_inches="tight",
                    facecolor="#36393f",
                )
                logging.info(f"Saving a plot to {full_plotpath}.")
                plt.close(fig)
        point = opt.ask()
        point_dict = dict(zip(param_ranges.keys(), point))
        logging.info("Testing {}".format(point_dict))

        engine_json = prepare_engines_json(commands=commands, fixed_params=fixed_params)
        logging.debug(f"engines.json is prepared:\n{engine_json}")
        write_engines_json(engine_json, point_dict)
        logging.info("Start experiment")
        now = datetime.now()
        out_exp, out_exp_err = run_match(**settings)
        later = datetime.now()
        difference = (later - now).total_seconds()
        logging.info(f"Experiment finished ({difference}s elapsed).")
        logging.debug(f"Raw result:\n{out_exp}\n{out_exp_err}")

        score, error = parse_experiment_result(out_exp, **settings)
        logging.info("Got score: {} +- {}".format(score, error))
        logging.info("Updating model")
        while True:
            try:
                now = datetime.now()
                # We fetch kwargs manually here to avoid collisions:
                n_samples = settings.get("acq_function_samples", acq_function_samples)
                gp_burnin = settings.get("gp_burnin", gp_burnin)
                gp_samples = settings.get("gp_samples", gp_samples)
                if opt.gp.chain_ is None:
                    gp_burnin = settings.get("gp_initial_burnin", gp_initial_burnin)
                    gp_samples = settings.get("gp_initial_samples", gp_initial_samples)
                    opt.tell(
                        point,
                        score,
                        n_samples=n_samples,
                        gp_samples=gp_samples,
                        gp_burnin=gp_burnin,
                    )
                else:
                    opt.tell(
                        point,
                        score,
                        n_samples=n_samples,
                        gp_samples=gp_samples,
                        gp_burnin=gp_burnin,
                    )
                later = datetime.now()
                difference = (later - now).total_seconds()
                logging.info(f"GP sampling finished ({difference}s)")
                logging.debug(f"GP kernel: {opt.gp.kernel_}")
            except ValueError:
                logging.warning(
                    "Error encountered during fitting. Trying to sample chain a bit. "
                    "If this problem persists, restart the tuner to reinitialize."
                )
                opt.gp.sample(n_burnin=5, priors=opt.gp_priors)
            else:
                break
        X.append(point)
        y.append(score)
        noise.append(error)
        iteration = len(X)

        with AtomicWriter(data_path, mode="wb", overwrite=True).open() as f:
            np.savez_compressed(f, np.array(X), np.array(y), np.array(noise))
Beispiel #2
0
def print_results(
    optimizer: Optimizer,
    result_object: OptimizeResult,
    parameter_names: Sequence[str],
    confidence: float = 0.9,
) -> None:
    """Log the current results of the optimizer.

    Parameters
    ----------
    optimizer : bask.Optimizer
        Fitted Optimizer object.
    result_object : scipy.optimize.OptimizeResult
        Result object containing the data and the last fitted model.
    parameter_names : Sequence of str
        Names of the parameters to use for printing.
    confidence : float, default=0.9
        Confidence used for the confidence intervals.
    """
    logger = logging.getLogger(LOGGER)
    try:
        best_point, best_value = expected_ucb(result_object, alpha=0.0)
        best_point_dict = dict(zip(parameter_names, best_point))
        with optimizer.gp.noise_set_to_zero():
            _, best_std = optimizer.gp.predict(
                optimizer.space.transform([best_point]), return_std=True
            )
        logger.info(f"Current optimum:\n{best_point_dict}")
        logger.info(
            f"Estimated Elo: {np.around(-best_value * 100, 4)} +- "
            f"{np.around(best_std * 100, 4).item()}"
        )
        confidence_mult = erfinv(confidence) * np.sqrt(2)
        lower_bound = np.around(
            -best_value * 100 - confidence_mult * best_std * 100, 4
        ).item()
        upper_bound = np.around(
            -best_value * 100 + confidence_mult * best_std * 100, 4
        ).item()
        logger.info(
            f"{confidence * 100}% confidence interval of the Elo value: "
            f"({lower_bound}, "
            f"{upper_bound})"
        )
        confidence_out = confidence_intervals(
            optimizer=optimizer,
            param_names=parameter_names,
            hdi_prob=confidence,
            opt_samples=1000,
            space_samples=5000,
            multimodal=True,
            only_mean=True,
        )
        logger.info(
            f"{confidence * 100}% confidence intervals of the parameters:"
            f"\n{confidence_out}"
        )
    except ValueError:
        logger.info(
            "Computing current optimum was not successful. "
            "This can happen in rare cases and running the "
            "tuner again usually works."
        )
Beispiel #3
0
def plot_objective(
    result,
    levels=20,
    n_points=200,
    n_samples=30,
    size=3,
    zscale="linear",
    dimensions=None,
    n_random_restarts=100,
    alpha=0.25,
    margin=0.65,
    colors=None,
    fig=None,
    ax=None,
):
    """Pairwise partial dependence plot of the objective function.
    The diagonal shows the partial dependence for dimension `i` with
    respect to the objective function. The off-diagonal shows the
    partial dependence for dimensions `i` and `j` with
    respect to the objective function. The objective function is
    approximated by `result.model.`
    Pairwise scatter plots of the points at which the objective
    function was directly evaluated are shown on the off-diagonal.
    A red point indicates the found minimum.
    Note: search spaces that contain `Categorical` dimensions are
          currently not supported by this function.
    Parameters
    ----------
    * `result` [`OptimizeResult`]
        The result for which to create the scatter plot matrix.
    * `levels` [int, default=10]
        Number of levels to draw on the contour plot, passed directly
        to `plt.contour()`.
    * `n_points` [int, default=40]
        Number of points at which to evaluate the partial dependence
        along each dimension.
    * `n_samples` [int, default=250]
        Number of random samples to use for averaging the model function
        at each of the `n_points`.
    * `size` [float, default=2]
        Height (in inches) of each facet.
    * `zscale` [str, default='linear']
        Scale to use for the z axis of the contour plots. Either 'linear'
        or 'log'.
    * `dimensions` [list of str, default=None] Labels of the dimension
        variables. `None` defaults to `space.dimensions[i].name`, or
        if also `None` to `['X_0', 'X_1', ..]`.
    * `n_random_restarts` [int, default=100]
        Number of restarts to try to find the global optimum.
    * `alpha` [float, default=0.25]
        Transparency of the sampled points.
    * `margin` [float, default=0.65]
        Margin in inches around the plot.
    * `colors` [list of tuples, default=None]
        Colors to use for the optima.
    * `fig` [Matplotlib figure, default=None]
        Figure to use for plotting. If None, it will create one.
    * `ax` [k x k axes, default=None]
        Axes on which to plot the marginals. If None, it will create appropriate
        axes.
    Returns
    -------
    * `ax`: [`Axes`]:
        The matplotlib axes.
    """
    if colors is None:
        colors = plt.cm.get_cmap("Set3").colors
    space = result.space
    samples = np.asarray(result.x_iters)
    rvs_transformed = space.transform(space.rvs(n_samples=n_samples))

    if zscale == "log":
        locator = LogLocator()
    elif zscale == "linear":
        locator = None
    else:
        raise ValueError("Valid values for zscale are 'linear' and 'log',"
                         " not '%s'." % zscale)
    if fig is None:
        fig, ax = plt.subplots(
            space.n_dims,
            space.n_dims,
            figsize=(size * space.n_dims, size * space.n_dims),
        )
    width, height = fig.get_size_inches()

    fig.subplots_adjust(
        left=margin / width,
        right=1 - margin / width,
        bottom=margin / height,
        top=1 - margin / height,
        hspace=0.1,
        wspace=0.1,
    )
    failures = 0
    while True:
        try:
            with result.models[-1].noise_set_to_zero():
                min_x = expected_ucb(result,
                                     alpha=0.0,
                                     n_random_starts=n_random_restarts)[0]
                min_ucb = expected_ucb(result,
                                       n_random_starts=n_random_restarts)[0]
        except ValueError:
            failures += 1
            if failures == 10:
                break
            continue
        else:
            break

    for i in range(space.n_dims):
        for j in range(space.n_dims):
            if i == j:
                xi, yi = partial_dependence(
                    space,
                    result.models[-1],
                    i,
                    j=None,
                    sample_points=rvs_transformed,
                    n_points=n_points,
                )
                yi_min, yi_max = np.min(yi), np.max(yi)
                ax[i, i].plot(xi, yi, color=colors[1])
                if failures != 10:
                    ax[i, i].axvline(min_x[i],
                                     linestyle="--",
                                     color=colors[3],
                                     lw=1)
                    ax[i, i].axvline(min_ucb[i],
                                     linestyle="--",
                                     color=colors[5],
                                     lw=1)
                    ax[i, i].text(
                        min_x[i],
                        yi_min + 0.9 * (yi_max - yi_min),
                        f"{np.around(min_x[i], 4)}",
                        color=colors[3],
                    )
                    ax[i, i].text(
                        min_ucb[i],
                        yi_min + 0.7 * (yi_max - yi_min),
                        f"{np.around(min_ucb[i], 4)}",
                        color=colors[5],
                    )

            # lower triangle
            elif i > j:
                xi, yi, zi = partial_dependence(space, result.models[-1], i, j,
                                                rvs_transformed, n_points)
                ax[i, j].contourf(xi,
                                  yi,
                                  zi,
                                  levels,
                                  locator=locator,
                                  cmap="viridis_r")
                ax[i, j].scatter(samples[:, j],
                                 samples[:, i],
                                 c="k",
                                 s=10,
                                 lw=0.0,
                                 alpha=alpha)
                if failures != 10:
                    ax[i, j].scatter(min_x[j], min_x[i], c=["r"], s=20, lw=0.0)
                    ax[i, j].scatter(min_ucb[j],
                                     min_ucb[i],
                                     c=["xkcd:orange"],
                                     s=20,
                                     lw=0.0)
    # Get all dimensions.
    plot_dims = []
    for row in range(space.n_dims):
        if space.dimensions[row].is_constant:
            continue
        plot_dims.append((row, space.dimensions[row]))
    return _format_scatter_plot_axes(
        ax,
        space,
        ylabel="Partial dependence",
        plot_dims=plot_dims,
        dim_labels=dimensions,
    )
Beispiel #4
0
    def run(self):
        # 0. Before we run the main loop, do we need to initialize or resume?
        #    * Resume from files (in experiment folder)
        #    * Create tune entry in db if it does not exist yet

        if "tune_id" not in self.experiment:
            with self.sessionmaker() as session:
                tune = SqlTune(
                    weight=self.experiment.get("weight", 1.0),
                    description=self.experiment.get("description", None),
                )
                session.add(tune)
                session.flush()
                self.experiment["tune_id"] = tune.id
                self.write_experiment_file()
                new_x = self.opt.ask()
                # Alter engine json using Initstrings
                params = dict(zip(self.parameters, new_x))
                self.change_engine_config(self.experiment["engine"], params)
                self.insert_jobs(session, new_x)
                self.logger.info("New jobs committed to database.")
        while True:
            self.logger.debug("Begin querying for new data...")
            # Check if minimum sample size and minimum wait time are reached, then query
            # data and update model:
            with self.sessionmaker() as session:
                X, y, variances, samplesize_reached = self.query_data(
                    session, include_active=True
                )
                self.logger.debug(
                    f"Queried the database for data and got (last 5):\n"
                    f"{X[-5:]}\n{y[-5:]}"
                )
                if len(X) == 0:
                    self.logger.info("There are no datapoints yet, start first job")
                    new_x = self.opt.ask()
                    # Alter engine json using Initstrings
                    params = dict(zip(self.parameters, new_x))
                    self.change_engine_config(self.experiment["engine"], params)
                    self.insert_jobs(session, new_x)
                    self.logger.info("New jobs committed to database.")
                    samplesize_reached = False

            if not samplesize_reached:
                sleep_seconds = self.experiment.get("sleep_time", 60)
                self.logger.debug(
                    f"Required sample size not yet reached. Sleeping {sleep_seconds}"
                    f"seconds."
                )
                sleep(sleep_seconds)
                continue

            # Tell optimizer about the new results:
            now = datetime.now()
            self.opt.tell(
                X.tolist(),
                y.tolist(),
                noise_vector=variances.tolist(),
                fit=True,
                replace=True,
                n_samples=self.tunecfg["n_samples"],
                gp_samples=self.tunecfg["gp_samples"],
                gp_burnin=self.tunecfg["gp_burnin"],
                progress=False,
            )
            later = datetime.now()
            difference = (later - now).total_seconds()
            self.logger.info(
                f"Calculating GP posterior and acquisition function finished in "
                f"{difference}s"
            )
            self.logger.info(f"Current GP kernel:\n{self.opt.gp.kernel_}")
            if self.opt.gp.chain_ is not None:
                self.logger.debug("Saving position and chain")
                self.save_state()

            # Ask optimizer for new configuration and insert jobs:
            new_x = self.opt.ask()
            # Alter engine json using Initstrings
            params = dict(zip(self.parameters, new_x))
            self.change_engine_config(self.experiment["engine"], params)
            with self.sessionmaker() as session:
                self.insert_jobs(session, new_x)
            self.logger.info("New jobs committed to database.")
            sleep(self.experiment.get("sleep_time", 60))

            if self.opt.gp.chain_ is not None:
                result_object = create_result(
                    Xi=X.tolist(),
                    yi=y.tolist(),
                    space=self.opt.space,
                    models=[self.opt.gp],
                )
                try:
                    opt_x, opt_y = expected_ucb(result_object)
                    self.logger.info(
                        f"Current optimum: "
                        f"{dict(zip(self.parameters, np.around(opt_x,4)))}"
                    )
                except ValueError:
                    self.logger.info(
                        "Current optimum: None (optimizer errored out :( )"
                    )
Beispiel #5
0
def plot_activesubspace_sufficient_summary(
        active_subspaces_object,
        inputs,
        outputs,
        result_object,
        active_subspace_figure=None,
        active_subspace_sufficient_summary_axes=None,
        filename=None,
        figsize=(10, 8),
        title="",
):
    """
    Plot the sufficient summary.

    :param numpy.ndarray inputs: array n_samples-by-n_params containing the
        points in the full input space.
    :param numpy.ndarray outputs: array n_samples-by-1 containing the
        corresponding function evaluations.
    :param str filename: if specified, the plot is saved at `filename`.
    :param tuple(int,int) figsize: tuple in inches defining the figure
        size. Defaults to (10, 8).
    :param str title: title of the plot.
    :raises: ValueError, TypeError

    .. warning:: `active_subspaces_object.fit` has to be called in advance.

        Plot only available for partitions up to dimension 2.
    """
    #ax = self or plt.gca()
    if active_subspaces_object.evects is None:
        raise TypeError("The eigenvectors have not been computed."
                        "You have to perform the fit method.")

    #plt.figure(figsize=figsize)
    #plt.title(title)
    #sufficient_summary_fig = plt.figure(figsize=figsize)
    #sufficient_summary_fig.suptitle(title)
    #ax = sufficient_summary_fig.add_subplot(111)

    best_point, best_value = expected_ucb(result_object, alpha=0.0)
    tuner_sample_points = np.asarray(result_object.x_iters)
    best_point_normalized_zero_to_one = result_object.space.transform(
        [best_point])
    tuner_sample_points_normalized_zero_to_one = result_object.space.transform(
        tuner_sample_points)
    #best_point_normalized_minus_one_to_one = Normalizer(0, 1).fit_transform(
    #best_point_normalized_zero_to_one
    #)
    best_point_normalized_minus_one_to_one = best_point_normalized_zero_to_one * 2 - 1
    #tuner_sample_points_normalized_minus_one_to_one = Normalizer(0, 1).fit_transform(
    #tuner_sample_points_normalized_zero_to_one
    #)
    tuner_sample_points_normalized_minus_one_to_one = (
        tuner_sample_points_normalized_zero_to_one * 2 - 1)

    if active_subspaces_object.dim == 1:
        active_subspace_sufficient_summary_axes.scatter(
            active_subspaces_object.transform(inputs)[0],
            outputs,
            c="blue",
            s=40,
            alpha=0.9,
            edgecolors="k",
        )
        active_subspace_sufficient_summary_axes.set_xlabel(
            "Active variable " + r"$W_1^T \mathbf{\mu}}$", fontsize=18)
        active_subspace_sufficient_summary_axes.set_ylabel(
            r"$f \, (\mathbf{\mu})$", fontsize=18)
    elif active_subspaces_object.dim == 2:
        active_subspace_x = active_subspaces_object.transform(inputs)[0]
        active_subspace_best_point = active_subspaces_object.transform(
            best_point_normalized_minus_one_to_one)[0]
        active_subspace_tuner_sample_points = active_subspaces_object.transform(
            tuner_sample_points_normalized_minus_one_to_one)[0]
        #scatter_plot= active_subspace_sufficient_summary_axes.scatter(
        #active_subspace_x[:, 0],
        #active_subspace_x[:, 1],
        #c=outputs.reshape(-1),
        #s=60,
        #alpha=0.9,
        #edgecolors='k',
        #vmin=np.min(outputs),
        #vmax=np.max(outputs)
        #)
        contour_plot = active_subspace_sufficient_summary_axes.tricontourf(
            active_subspace_x[:, 0],
            active_subspace_x[:, 1],
            outputs.reshape(-1),
            levels=20,
            alpha=0.9,
            cmap="viridis_r",
            edgecolors="k",
            vmin=np.min(outputs),
            vmax=np.max(outputs),
        )
        active_subspace_sufficient_summary_axes.scatter(
            active_subspace_tuner_sample_points[:, 0],
            active_subspace_tuner_sample_points[:, 1],
            c="k",
            s=20,
            lw=0.0,
            alpha=0.25,
        )
        active_subspace_sufficient_summary_axes.scatter(
            active_subspace_best_point[0, 0],
            active_subspace_best_point[0, 1],
            c=["r"],
            s=20,
            lw=0.0,
        )
        active_subspace_sufficient_summary_axes.set_xlabel(
            "First active variable", fontsize=18)
        active_subspace_sufficient_summary_axes.set_ylabel(
            "Second active variable", fontsize=18)
        ymin = 1.1 * np.amin([
            np.amin(active_subspace_x[:, 0]),
            np.amin(active_subspace_x[:, 1])
        ])
        ymax = 1.1 * np.amax([
            np.amax(active_subspace_x[:, 0]),
            np.amax(active_subspace_x[:, 1])
        ])
        active_subspace_sufficient_summary_axes.axis("equal")
        active_subspace_sufficient_summary_axes.axis([ymin, ymax, ymin, ymax])

        active_subspace_figure.colorbar(
            contour_plot, ax=active_subspace_sufficient_summary_axes)
    else:
        raise ValueError(
            "Sufficient summary plots cannot be made in more than 2 dimensions."
        )

    active_subspace_sufficient_summary_axes.grid(linestyle="dotted")
    #sufficient_summary_fig.tight_layout()

    #if filename:
    #    sufficient_summary_fig.savefig(filename)
    #else:
    #    return sufficient_summary_fig
    return active_subspace_sufficient_summary_axes