예제 #1
0
def test_gen_commands(args, run_uuid):
    args, opt_file_lookup = args

    assume(
        all(
            launcher._is_arg_safe(ss) for ss in args.values()
            if isinstance(ss, str)))

    uniqify = [
        CmdArgs.optimizer, CmdArgs.data, CmdArgs.classifier, CmdArgs.metric
    ]
    for uu in uniqify:
        assume(all(launcher._is_arg_safe(ss) for ss in args[uu]))
        args[uu] = list(set(args[uu]))

    m_set = set(args[CmdArgs.metric])
    m_lookup = {
        problem_type: sorted(m_set.intersection(mm))
        for problem_type, mm in data.METRICS_LOOKUP.items()
    }
    ok = all(
        len(m_lookup[data.get_problem_type(dd)]) > 0
        for dd in args[CmdArgs.data])
    assume(ok)

    G = launcher.gen_commands(args, opt_file_lookup, run_uuid)
    L = list(G)
    assert L is not None
예제 #2
0
def test_run_sklearn_study(api_config, model_name, dataset, scorer, n_calls,
                           n_suggestions, seed):
    prob_type = data.get_problem_type(dataset)
    assume(scorer in data.METRICS_LOOKUP[prob_type])

    random = np.random.RandomState(seed)
    exp.run_sklearn_study(RandomOptimizer, {"random": random}, model_name,
                          dataset, scorer, n_calls, n_suggestions)
예제 #3
0
def test_run_study(api_config, model_name, dataset, scorer, n_calls,
                   n_suggestions, seed):
    api_config, _, _, _ = api_config

    prob_type = data.get_problem_type(dataset)
    assume(scorer in data.METRICS_LOOKUP[prob_type])

    function_instance = SklearnModel(model_name, dataset, scorer)
    optimizer = RandomOptimizer(api_config, random=np.random.RandomState(seed))
    optimizer.get_version()
    exp.run_study(optimizer, function_instance, n_calls, n_suggestions)
예제 #4
0
def test_run_sklearn_study_real(api_config, model_name, dataset, scorer,
                                n_calls, n_suggestions):
    prob_type = data.get_problem_type(dataset)
    assume(scorer in data.METRICS_LOOKUP[prob_type])

    # Should really do parametric test but for loop good enough
    for opt_name in sorted(CONFIG.keys()):
        opt_class = exp._get_opt_class(opt_name)
        # opt_root=None should work with built-in opt
        opt_kwargs = exp.load_optimizer_kwargs(opt_name, opt_root=None)

        exp.run_sklearn_study(opt_class, opt_kwargs, model_name, dataset,
                              scorer, n_calls, n_suggestions)
예제 #5
0
def test_sklearn_model(model, dataset, metric, shuffle_seed, rs_seed):
    prob_type = data.get_problem_type(dataset)
    assume(metric in data.METRICS_LOOKUP[prob_type])

    test_prob = skf.SklearnModel(model,
                                 dataset,
                                 metric,
                                 shuffle_seed=shuffle_seed)

    api_config = test_prob.get_api_config()
    x_guess, = suggest_dict([], [],
                            api_config,
                            n_suggestions=1,
                            random=np.random.RandomState(rs_seed))

    loss = test_prob.evaluate(x_guess)
    assert np.isscalar(loss)
def test_sklearn_model(model, dataset, metric, shuffle_seed, rs_seed):
    prob_type = data.get_problem_type(dataset)
    assume(metric in data.METRICS_LOOKUP[prob_type])

    test_prob = skf.SklearnModel(model,
                                 dataset,
                                 metric,
                                 shuffle_seed=shuffle_seed)

    api_config = test_prob.get_api_config()
    x_guess, = suggest_dict([], [],
                            api_config,
                            n_suggestions=1,
                            random=np.random.RandomState(rs_seed))

    loss = test_prob.evaluate(x_guess)

    assert isinstance(loss, tuple)
    assert all(isinstance(xx, float) for xx in loss)
    assert np.shape(loss) == np.shape(test_prob.objective_names)
예제 #7
0
def test_run_study_bounds_fail(model_name, dataset, scorer, n_calls,
                               n_suggestions, seed):
    prob_type = data.get_problem_type(dataset)
    assume(scorer in data.METRICS_LOOKUP[prob_type])

    function_instance = SklearnModel(model_name, dataset, scorer)
    optimizer = OutOfBoundsOptimizer(function_instance.get_api_config(),
                                     random=np.random.RandomState(seed))
    optimizer.get_version()

    # pytest have some assert failed tools we could use instead, but this is ok for now
    bounds_fails = False
    try:
        exp.run_study(optimizer,
                      function_instance,
                      n_calls,
                      n_suggestions,
                      n_obj=len(function_instance.objective_names))
    except Exception as e:
        bounds_fails = str(e) == "Optimizer suggestion is out of range."
    assert bounds_fails
예제 #8
0
def test_dry_run(args, run_uuid, seed):
    args, opt_file_lookup = args

    assume(
        all(
            launcher._is_arg_safe(ss) for ss in args.values()
            if isinstance(ss, str)))

    uniqify = [
        CmdArgs.optimizer, CmdArgs.data, CmdArgs.classifier, CmdArgs.metric
    ]
    for uu in uniqify:
        assume(all(launcher._is_arg_safe(ss) for ss in args[uu]))
        args[uu] = list(set(args[uu]))

    m_set = set(args[CmdArgs.metric])
    m_lookup = {
        problem_type: sorted(m_set.intersection(mm))
        for problem_type, mm in data.METRICS_LOOKUP.items()
    }
    ok = all(
        len(m_lookup[data.get_problem_type(dd)]) > 0
        for dd in args[CmdArgs.data])
    assume(ok)

    fp_buf = StringIO()
    random = np.random.RandomState(seed)

    with warnings.catch_warnings():
        warnings.simplefilter("ignore", category=UserWarning)
        launcher.dry_run(args,
                         opt_file_lookup,
                         run_uuid,
                         fp_buf,
                         random=random)

    jobs = fp_buf.getvalue()
    assert jobs is not None
예제 #9
0
    def __init__(self, model, dataset, scorer, path):
        """Build class that wraps sklearn classifier/regressor CV score for use as an objective function surrogate.

        Parameters
        ----------
        model : str
            Which classifier to use, must be key in `MODELS_CLF` or `MODELS_REG` dict depending on if dataset is
            classification or regression.
        dataset : str
            Which data set to use, must be key in `DATA_LOADERS` dict, or name of custom csv file.
        scorer : str
            Which sklearn scoring metric to use, in `SCORERS_CLF` list or `SCORERS_REG` dict depending on if dataset is
            classification or regression.
        path : str
            Root directory to look for all pickle files.
        """
        TestFunction.__init__(self)

        # Find the space class, we could consider putting this in pkl too
        problem_type = get_problem_type(dataset)
        assert problem_type in (ProblemType.clf, ProblemType.reg)
        _, _, self.api_config = MODELS_CLF[
            model] if problem_type == ProblemType.clf else MODELS_REG[model]
        self.space = JointSpace(self.api_config)

        # Load the pre-trained model
        fname = SklearnModel.test_case_str(model, dataset, scorer) + ".pkl"

        if isinstance(path, bytes):
            # This is for test-ability, we could use mock instead.
            self.model = pkl.loads(path)
        else:
            path = os.path.join(path, fname)  # pragma: io
            assert os.path.isfile(path), "Model file not found: %s" % path

            with absopen(path, "rb") as f:  # pragma: io
                self.model = pkl.load(f)  # pragma: io
        assert callable(getattr(self.model, "predict", None))
예제 #10
0
def test_run_study_callback(model_name, dataset, scorer, n_calls,
                            n_suggestions, seed):
    prob_type = data.get_problem_type(dataset)
    assume(scorer in data.METRICS_LOOKUP[prob_type])

    function_instance = SklearnModel(model_name, dataset, scorer)
    optimizer = RandomOptimizer(function_instance.get_api_config(),
                                random=np.random.RandomState(seed))
    optimizer.get_version()
    n_obj = len(function_instance.objective_names)

    function_evals_cmin = np.zeros((n_calls, n_obj), dtype=float)
    iters_list = []

    def callback(f_min, iters):
        assert f_min.shape == (n_obj, )

        iters_list.append(iters)
        if iters == 0:
            assert np.all(f_min == np.inf)
            return

        function_evals_cmin[iters - 1, :] = f_min

    function_evals, _, _ = exp.run_study(optimizer,
                                         function_instance,
                                         n_calls,
                                         n_suggestions,
                                         n_obj=n_obj,
                                         callback=callback)

    assert iters_list == list(range(n_calls + 1))

    for ii in range(n_obj):
        for jj in range(n_calls):
            idx0, idx1 = np_util.argmin_2d(function_evals[:jj + 1, :, 0])
            assert function_evals_cmin[jj, ii] == function_evals[idx0, idx1,
                                                                 ii]
def test_sklearn_model_surr(model, dataset, metric, model_seed, rs_seed):
    prob_type = data.get_problem_type(dataset)
    assume(metric in data.METRICS_LOOKUP[prob_type])

    test_prob = skf.SklearnModel(model, dataset, metric, shuffle_seed=0)
    api_config = test_prob.get_api_config()
    space = JointSpace(api_config)

    n_obj = len(test_prob.objective_names)

    n_suggestions = 20

    x_guess = suggest_dict([], [],
                           api_config,
                           n_suggestions=n_suggestions,
                           random=np.random.RandomState(rs_seed))
    x_guess_w = space.warp(x_guess)

    random = np.random.RandomState(model_seed)
    y = random.randn(n_suggestions, n_obj)

    reg = LinearRegression()
    reg.fit(x_guess_w, y)
    loss0 = reg.predict(x_guess_w)

    path = pkl.dumps(reg)
    del reg
    assert isinstance(path, bytes)

    test_prob_surr = skf.SklearnSurrogate(model, dataset, metric, path)
    loss = test_prob_surr.evaluate(x_guess[0])

    assert isinstance(loss, tuple)
    assert all(isinstance(xx, float) for xx in loss)
    assert np.shape(loss) == np.shape(test_prob.objective_names)

    assert np.allclose(loss0[0], np.array(loss))
예제 #12
0
def experiment_main(opt_class, args=None):  # pragma: main
    """This is in effect the `main` routine for this experiment. However, it is called from the optimizer wrapper file
    so the class can be passed in. The optimizers are assumed to be outside the package, so the optimizer class can't
    be named from inside the main function without using hacky stuff like `eval`.
    """
    if args is None:
        description = "Run a study with one benchmark function and an optimizer"
        args = cmd.parse_args(cmd.experiment_parser(description))
    args[CmdArgs.opt_rev] = opt_class.get_version()

    run_uuid = uuid.UUID(args[CmdArgs.uuid])

    logging.captureWarnings(True)

    # Setup logging to both a file and stdout (if verbose is set to True)
    logger.setLevel(logging.INFO)  # Note this is the module-wide logger
    logfile = XRSerializer.logging_path(args[CmdArgs.db_root],
                                        args[CmdArgs.db], run_uuid)
    logger_file_handler = logging.FileHandler(logfile, mode="w")
    logger.addHandler(logger_file_handler)
    if args[CmdArgs.verbose]:
        logger.addHandler(logging.StreamHandler())

    warnings_logger = logging.getLogger("py.warnings")
    warnings_logger.addHandler(logger_file_handler)
    if args[CmdArgs.verbose]:
        warnings_logger.addHandler(logging.StreamHandler())

    logger.info("running: %s" % str(cmd.serializable_dict(args)))
    logger.info("cmd: %s" % cmd.cmd_str())

    assert (args[CmdArgs.metric]
            in METRICS_LOOKUP[get_problem_type(args[CmdArgs.data])]
            ), "reg/clf metrics can only be used on compatible dataset"

    # Setup random streams for computing the signature, must use same seed
    # across all runs to ensure signature is consistent. This seed is random:
    _setup_seeds(
        "7e9f2cabb0dd4f44bc10cf18e440b427")  # pragma: allowlist secret
    signature = get_objective_signature(args[CmdArgs.classifier],
                                        args[CmdArgs.data],
                                        args[CmdArgs.metric],
                                        data_root=args[CmdArgs.data_root])
    logger.info("computed signature: %s" % str(signature))

    opt_kwargs = load_optimizer_kwargs(args[CmdArgs.optimizer],
                                       args[CmdArgs.optimizer_root])

    # Setup the call back for intermediate logging
    if cc.BASELINE not in XRSerializer.get_derived_keys(args[CmdArgs.db_root],
                                                        db=args[CmdArgs.db]):
        warnings.warn("Baselines not found. Will not log intermediate scores.")
        callback = None
    else:
        test_case_str = SklearnModel.test_case_str(args[CmdArgs.classifier],
                                                   args[CmdArgs.data],
                                                   args[CmdArgs.metric])
        optimizer_str = str_join_safe(
            ARG_DELIM, (args[CmdArgs.optimizer], args[CmdArgs.opt_rev],
                        args[CmdArgs.rev]))

        baseline_ds, baselines_meta = XRSerializer.load_derived(
            args[CmdArgs.db_root], db=args[CmdArgs.db], key=cc.BASELINE)

        # Check the objective function signatures match in the baseline file
        sig_errs, _ = analyze_signature_pair({test_case_str: signature[1]},
                                             baselines_meta["signature"])
        logger.info("Signature errors:\n%s" % sig_errs.to_string())
        print(json.dumps({"exp sig errors": sig_errs.T.to_dict()}))

        def log_mean_score_json(evals, iters):
            assert evals.shape == (len(OBJECTIVE_NAMES), )
            assert not np.any(np.isnan(evals))

            log_msg = {
                cc.TEST_CASE: test_case_str,
                cc.METHOD: optimizer_str,
                cc.TRIAL: args[CmdArgs.uuid],
                cc.ITER: iters,
            }

            for idx, obj in enumerate(OBJECTIVE_NAMES):
                assert OBJECTIVE_NAMES[idx] == obj

                # Extract relevant rescaling info
                slice_ = {cc.TEST_CASE: test_case_str, OBJECTIVE: obj}
                best_opt = baseline_ds[cc.PERF_BEST].sel(
                    slice_, drop=True).values.item()
                base_clip_val = baseline_ds[cc.PERF_CLIP].sel(
                    slice_, drop=True).values.item()

                # Perform the same rescaling as found in experiment_analysis.compute_aggregates()
                score = linear_rescale(evals[idx],
                                       best_opt,
                                       base_clip_val,
                                       0.0,
                                       1.0,
                                       enforce_bounds=False)
                # Also, clip the score from below at -1 to limit max influence of single run on final average
                score = np.clip(score, -1.0, 1.0)
                score = score.item()  # Make easiest for logging in JSON
                assert isinstance(score, float)

                # Note: This is not the raw score but the rescaled one!
                log_msg[obj] = score
            log_msg = json.dumps(log_msg)
            print(log_msg, flush=True)
            # One second safety delay to protect against subprocess stdout getting lost
            sleep(1)

        callback = log_mean_score_json

    # Now set the seeds for the actual experiment
    _setup_seeds(args[CmdArgs.uuid])

    # Now do the experiment
    logger.info("starting sklearn study %s %s %s %s %d %d" % (
        args[CmdArgs.optimizer],
        args[CmdArgs.classifier],
        args[CmdArgs.data],
        args[CmdArgs.metric],
        args[CmdArgs.n_calls],
        args[CmdArgs.n_suggest],
    ))
    logger.info("with data root: %s" % args[CmdArgs.data_root])
    function_evals, timing, suggest_log = run_sklearn_study(
        opt_class,
        opt_kwargs,
        args[CmdArgs.classifier],
        args[CmdArgs.data],
        args[CmdArgs.metric],
        args[CmdArgs.n_calls],
        args[CmdArgs.n_suggest],
        data_root=args[CmdArgs.data_root],
        callback=callback,
    )

    # Curate results into clean dataframes
    eval_ds = build_eval_ds(function_evals, OBJECTIVE_NAMES)
    time_ds = build_timing_ds(*timing)
    suggest_ds = build_suggest_ds(suggest_log)

    # setup meta:
    meta = {"args": cmd.serializable_dict(args), "signature": signature}
    logger.info("saving meta data: %s" % str(meta))

    # Now the final IO to export the results
    logger.info("saving results")
    XRSerializer.save(eval_ds,
                      meta,
                      args[CmdArgs.db_root],
                      db=args[CmdArgs.db],
                      key=cc.EVAL,
                      uuid_=run_uuid)

    logger.info("saving timing")
    XRSerializer.save(time_ds,
                      meta,
                      args[CmdArgs.db_root],
                      db=args[CmdArgs.db],
                      key=cc.TIME,
                      uuid_=run_uuid)

    logger.info("saving suggest log")
    XRSerializer.save(suggest_ds,
                      meta,
                      args[CmdArgs.db_root],
                      db=args[CmdArgs.db],
                      key=cc.SUGGEST_LOG,
                      uuid_=run_uuid)

    logger.info("done")
예제 #13
0
def test_get_problem_type(dataset_name):
    problem_type = data.get_problem_type(dataset_name)
    assert problem_type is not None
예제 #14
0
def gen_commands(args, opt_file_lookup, run_uuid):
    """Generator providing commands to launch processes for experiments.

    Parameters
    ----------
    args : dict(CmdArgs, [int, str])
        Arguments of options to pass to the experiments being launched. The keys corresponds to the same arguments
        passed to this program.
    opt_file_lookup : dict(str, str)
        Mapping from method name to filename containing wrapper class for the method.
    run_uuid : uuid.UUID
        UUID for this launcher run. Needed to generate different experiments UUIDs on each call. This function is
        deterministic provided the same `run_uuid`.

    Yields
    ------
    iteration_key : (str, str, str, str)
        Tuple containing ``(trial, classifier, data, optimizer)`` to index the experiment.
    full_cmd : tuple(str)
        Strings containing command and arguments to run a process with experiment. Join with whitespace or use
        :func:`.util.shell_join` to get string with executable command. The command omits ``--opt-root`` which means it
        will default to ``.`` if the command is executed. As such, the command assumes it is executed with
        ``--opt-root`` as the working directory.
    """
    args_to_pass_thru = [
        CmdArgs.n_calls, CmdArgs.n_suggest, CmdArgs.db_root, CmdArgs.db
    ]
    # This could be made simpler and avoid if statement if we just always pass dataroot, even if no custom data used.
    if args[CmdArgs.data_root] is not None:
        args_to_pass_thru.append(CmdArgs.data_root)

    # Possibilities to iterate over. Put them in sorted order just for good measure.
    c_list = strict_sorted(MODEL_NAMES if args[CmdArgs.classifier] is None else
                           args[CmdArgs.classifier])
    d_list = strict_sorted(DATA_LOADER_NAMES if args[CmdArgs.data] is None else
                           args[CmdArgs.data])
    o_list = strict_sorted(
        list(opt_file_lookup.keys()) + list(CONFIG.keys())
        if args[CmdArgs.optimizer] is None else args[CmdArgs.optimizer])
    assert all(((optimizer in opt_file_lookup) or (optimizer in CONFIG))
               for optimizer in o_list), "unknown optimizer in optimizer list"

    m_set = set(
        METRICS if args[CmdArgs.metric] is None else args[CmdArgs.metric])
    m_lookup = {
        problem_type: sorted(m_set.intersection(mm))
        for problem_type, mm in METRICS_LOOKUP.items()
    }
    assert all(
        (len(m_lookup[get_problem_type(data)]) > 0) for data in
        d_list), "At one metric needed for each problem type of data sets"

    G = product(range_str(args[CmdArgs.n_repeat]), c_list, d_list,
                o_list)  # iterate all combos
    for rep, classifier, data, optimizer in G:
        _, rep_str = rep
        problem_type = get_problem_type(data)
        for metric in m_lookup[problem_type]:
            # Get a reproducible string based (conditioned on having same (run uuid), but should also never give
            # a duplicate (unless we force the same run uuid twice).
            iteration_key = (rep_str, classifier, data, optimizer, metric)
            iteration_id = str_join_safe(ARG_DELIM, iteration_key)
            sub_uuid = pyuuid.uuid5(run_uuid, iteration_id).hex

            # Build the argument list for subproc, passing some args thru
            cmd_args_pass_thru = [[CMD_STR[vv][0],
                                   arg_safe_str(args[vv])]
                                  for vv in args_to_pass_thru]
            # Technically, the optimizer is is not actually needed here for non-built in optimizers because it already
            # specified via the entry point: optimizer_wrapper_file
            cmd_args = [
                [CMD_STR[CmdArgs.classifier][0],
                 arg_safe_str(classifier)],
                [CMD_STR[CmdArgs.data][0],
                 arg_safe_str(data)],
                [CMD_STR[CmdArgs.optimizer][0],
                 arg_safe_str(optimizer)],
                [CMD_STR[CmdArgs.uuid][0],
                 arg_safe_str(sub_uuid)],
                [CMD_STR[CmdArgs.metric][0],
                 arg_safe_str(metric)],
            ]
            cmd_args = tuple(sum(cmd_args + cmd_args_pass_thru, []))
            logger.info(" ".join(cmd_args))

            # The experiment command without the arguments
            if optimizer in CONFIG:  # => built in optimizer wrapper
                experiment_cmd = (EXPERIMENT_ENTRY, )
            else:
                optimizer_wrapper_file = opt_file_lookup[optimizer]
                assert optimizer_wrapper_file.endswith(
                    ".py"), "optimizer wrapper should a be .py file"
                experiment_cmd = (PY_INTERPRETER, optimizer_wrapper_file)

            # Check arg safe again, off elements in list need to be argsafe
            assert all((_is_arg_safe(ss) == (ii % 2 == 1))
                       for ii, ss in enumerate(cmd_args))

            full_cmd = experiment_cmd + cmd_args
            yield iteration_key, full_cmd
예제 #15
0
def test_get_objective_signature(model_name, dataset, scorer):
    prob_type = data.get_problem_type(dataset)
    assume(scorer in data.METRICS_LOOKUP[prob_type])

    exp.get_objective_signature(model_name, dataset, scorer)
예제 #16
0
def experiment_main(opt_class, args=None):  # pragma: main
    """This is in effect the `main` routine for this experiment. However, it is called from the optimizer wrapper file
    so the class can be passed in. The optimizers are assumed to be outside the package, so the optimizer classs can't
    be named from inside the main function without using hacky stuff like `eval`.
    """
    if args is None:
        description = "Run a study with one benchmark function and an optimizer"
        args = cmd.parse_args(cmd.experiment_parser(description))
    args[CmdArgs.opt_rev] = opt_class.get_version()

    run_uuid = uuid.UUID(args[CmdArgs.uuid])

    logging.captureWarnings(True)

    # Setup logging to both a file and stdout (if verbose is set to True)
    logger.setLevel(logging.INFO)  # Note this is the module-wide logger
    logfile = XRSerializer.logging_path(args[CmdArgs.db_root],
                                        args[CmdArgs.db], run_uuid)
    logger_file_handler = logging.FileHandler(logfile, mode="w")
    logger.addHandler(logger_file_handler)
    if args[CmdArgs.verbose]:
        logger.addHandler(logging.StreamHandler())

    warnings_logger = logging.getLogger("py.warnings")
    warnings_logger.addHandler(logger_file_handler)
    if args[CmdArgs.verbose]:
        warnings_logger.addHandler(logging.StreamHandler())

    logger.info("running: %s" % str(cmd.serializable_dict(args)))
    logger.info("cmd: %s" % cmd.cmd_str())

    assert (args[CmdArgs.metric]
            in METRICS_LOOKUP[get_problem_type(args[CmdArgs.data])]
            ), "reg/clf metrics can only be used on compatible dataset"

    # Setup random streams for computing the signature, must use same seed
    # across all runs to ensure signature is consistent. This seed is random:
    _setup_seeds(
        "7e9f2cabb0dd4f44bc10cf18e440b427")  # pragma: allowlist secret
    signature = get_objective_signature(args[CmdArgs.classifier],
                                        args[CmdArgs.data],
                                        args[CmdArgs.metric],
                                        data_root=args[CmdArgs.data_root])
    logger.info("computed signature: %s" % str(signature))

    opt_kwargs = load_optimizer_kwargs(args[CmdArgs.optimizer],
                                       args[CmdArgs.optimizer_root])

    # Now set the seeds for the actual experiment
    _setup_seeds(args[CmdArgs.uuid])

    # Now do the experiment
    logger.info("starting sklearn study %s %s %s %s %d %d" % (
        args[CmdArgs.optimizer],
        args[CmdArgs.classifier],
        args[CmdArgs.data],
        args[CmdArgs.metric],
        args[CmdArgs.n_calls],
        args[CmdArgs.n_suggest],
    ))
    logger.info("with data root: %s" % args[CmdArgs.data_root])
    function_evals, timing = run_sklearn_study(
        opt_class,
        opt_kwargs,
        args[CmdArgs.classifier],
        args[CmdArgs.data],
        args[CmdArgs.metric],
        args[CmdArgs.n_calls],
        args[CmdArgs.n_suggest],
        data_root=args[CmdArgs.data_root],
    )

    # Curate results into clean dataframes
    eval_ds = build_eval_ds(function_evals)
    time_ds = build_timing_ds(*timing)

    # setup meta:
    meta = {"args": cmd.serializable_dict(args), "signature": signature}
    logger.info("saving meta data: %s" % str(meta))

    # Now the final IO to export the results
    logger.info("saving results")
    XRSerializer.save(eval_ds,
                      meta,
                      args[CmdArgs.db_root],
                      db=args[CmdArgs.db],
                      key=cc.EVAL,
                      uuid_=run_uuid)

    logger.info("saving timing")
    XRSerializer.save(time_ds,
                      meta,
                      args[CmdArgs.db_root],
                      db=args[CmdArgs.db],
                      key=cc.TIME,
                      uuid_=run_uuid)

    logger.info("done")