def load_experiments(uuid_list, db_root, dbid): # pragma: io """Generator to load the results of the experiments. Parameters ---------- uuid_list : list(uuid.UUID) List of UUIDs corresponding to experiments to load. db_root : str Root location for data store as requested by the serializer used. dbid : str Name of the data store as requested by the serializer used. Yields ------ meta_data : (str, str, str) The `meta_data` contains a `tuple` of `str` with ``test_case, optimizer, uuid``. data : (:class:`xarray:xarray.Dataset`, :class:`xarray:xarray.Dataset`, :class:`xarray:xarray.Dataset` list(float)) The `data` contains a tuple of ``(perf_ds, time_ds, suggest_ds, sig)``. The `perf_ds` is a :class:`xarray:xarray.Dataset` containing the evaluation results with dimensions ``(ITER, SUGGEST)``, each variable is an objective. The `time_ds` is an :class:`xarray:xarray.Dataset` containing the timing results of the form accepted by `summarize_time`. The coordinates must be compatible with `perf_ds`. The suggest_ds is a :class:`xarray:xarray.Dataset` containing the inputs to the function evaluations. Each variable is a function input. Finally, `sig` contains the `test_case` signature and must be `list(float)`. """ uuids_seen = set() for uuid_ in uuid_list: logger.info(uuid_.hex) # Load perf and timing data perf_ds, meta = XRSerializer.load(db_root, db=dbid, key=cc.EVAL, uuid_=uuid_) time_ds, meta_t = XRSerializer.load(db_root, db=dbid, key=cc.TIME, uuid_=uuid_) assert meta == meta_t, "meta data should between time and eval files" suggest_ds, meta_t = XRSerializer.load(db_root, db=dbid, key=cc.SUGGEST_LOG, uuid_=uuid_) assert meta == meta_t, "meta data should between suggest and eval files" # Get signature to pass out as well _, sig = meta["signature"] logger.info(meta) logger.info(sig) # Build the new indices for combined data, this could be put in function for easier testing eval_args = unserializable_dict(meta["args"]) # Unpack meta-data test_case = SklearnModel.test_case_str( eval_args[CmdArgs.classifier], eval_args[CmdArgs.data], eval_args[CmdArgs.metric] ) optimizer = str_join_safe( ARG_DELIM, (eval_args[CmdArgs.optimizer], eval_args[CmdArgs.opt_rev], eval_args[CmdArgs.rev]) ) args_uuid = eval_args[CmdArgs.uuid] # Check UUID sanity assert isinstance(args_uuid, str) assert args_uuid == uuid_.hex, "UUID meta-data does not match filename" assert args_uuid not in uuids_seen, "uuids being reused between studies" uuids_seen.add(args_uuid) # Return key -> data so this generator can be iterated over in dict like manner meta_data = (test_case, optimizer, args_uuid) data = (perf_ds, time_ds, suggest_ds, sig) yield meta_data, data
def test_uuid_to_fname(uu): ff = XRSerializer._uuid_to_fname(uu) uu_ = XRSerializer._fname_to_uuid(ff) assert uu == uu_ ff_ = XRSerializer._uuid_to_fname(uu_) assert ff == ff_
def main(): """See README for instructions on calling aggregate. """ description = "Aggregate study results across functions and optimizers" args = parse_args(agg_parser(description)) logger.setLevel(logging.INFO) # Note this is the module-wide logger if args[CmdArgs.verbose]: logger.addHandler(logging.StreamHandler()) # Get list of UUIDs uuid_list = XRSerializer.get_uuids(args[CmdArgs.db_root], db=args[CmdArgs.db], key=cc.EVAL) uuid_list_ = XRSerializer.get_uuids(args[CmdArgs.db_root], db=args[CmdArgs.db], key=cc.TIME) assert uuid_list == uuid_list_, "UUID list does not match between time and eval results" uuid_list_ = XRSerializer.get_uuids(args[CmdArgs.db_root], db=args[CmdArgs.db], key=cc.SUGGEST_LOG) assert uuid_list == uuid_list_, "UUID list does not match between suggest log and eval results" # Get iterator of all experiment data dumps, load in and process, and concat data_G = load_experiments(uuid_list, args[CmdArgs.db_root], args[CmdArgs.db]) all_perf, all_time, all_suggest, all_sigs = concat_experiments(data_G, ravel=args[CmdArgs.ravel]) # Check the concat signatures make are coherent sig_errs, signatures_median = analyze_signatures(all_sigs) logger.info("Signature errors:\n%s" % sig_errs.to_string()) print(json.dumps({"exp-agg sig errors": sig_errs.T.to_dict()})) # Dump and save it all out logger.info("saving") meta = {"args": serializable_dict(args), "signature": signatures_median} XRSerializer.save_derived(all_perf, meta, args[CmdArgs.db_root], db=args[CmdArgs.db], key=EVAL_RESULTS) XRSerializer.save_derived(all_time, meta, args[CmdArgs.db_root], db=args[CmdArgs.db], key=TIME_RESULTS) for test_case, ds in all_suggest.items(): XRSerializer.save_derived(ds, meta, args[CmdArgs.db_root], db=args[CmdArgs.db], key=test_case) logger.info("done")
def do_baseline(args): # pragma: io """Alternate entry into the program without calling the actual main. """ # Load in the eval data and sanity check perf_da, meta = XRSerializer.load_derived(args[CmdArgs.db_root], db=args[CmdArgs.db], key=cc.EVAL_RESULTS) perf_da = xru.only_dataarray(perf_da) logger.info("Meta data from source file: %s" % str(meta["args"])) baseline_ds = compute_baseline(perf_da) # Now dump the results XRSerializer.save_derived(baseline_ds, meta, args[CmdArgs.db_root], db=args[CmdArgs.db], key=cc.BASELINE)
def real_run(args, opt_file_lookup, run_uuid, timeout=None): # pragma: io """Run sequence of independent experiments to fully run the benchmark. This uses `subprocess` to launch a separate process (in serial) for each experiment. Parameters ---------- args : dict(CmdArgs, [int, str]) Arguments of options to pass to the experiments being launched. The keys corresponds to the same arguments passed to this program. opt_file_lookup : dict(str, str) Mapping from method name to filename containing wrapper class for the method. run_uuid : uuid.UUID UUID for this launcher run. Needed to generate different experiments UUIDs on each call. This function is deterministic provided the same `run_uuid`. timeout : int Max seconds per experiment """ args[CmdArgs.db] = XRSerializer.init_db(args[CmdArgs.db_root], db=args[CmdArgs.db], keys=EXP_VARS, exist_ok=True) logger.info("Supply --db %s to append to this experiment or reproduce jobs file." % args[CmdArgs.db]) # Get and run the commands in a sub-process counter = 0 G = gen_commands(args, opt_file_lookup, run_uuid) for _, full_cmd in G: try: status = call(full_cmd, shell=False, cwd=args[CmdArgs.optimizer_root], timeout=timeout) if status != 0: raise ChildProcessError("status code %d returned from:\n%s" % (status, " ".join(full_cmd))) except TimeoutExpired: logger.info(f"Experiment timeout after {timeout} seconds.") print(json.dumps({"experiment_timeout_exception": " ".join(full_cmd)})) counter += 1 logger.info(f"Benchmark script ran {counter} studies successfully.")
def dry_run(args, opt_file_lookup, run_uuid, fp, random=np_random): """Write to buffer description of commands for running all experiments. This function is almost pure by writing to a buffer, but it could be switched to a generator. Parameters ---------- args : dict(CmdArgs, [int, str]) Arguments of options to pass to the experiments being launched. The keys corresponds to the same arguments passed to this program. opt_file_lookup : dict(str, str) Mapping from method name to filename containing wrapper class for the method. run_uuid : uuid.UUID UUID for this launcher run. Needed to generate different experiments UUIDs on each call. This function is deterministic provided the same `run_uuid`. fp : writable buffer File handle to write out sequence of commands to execute (broken into jobs on each line) to execute all the experiments (possibly each job in parallel). random : RandomState Random stream to use for reproducibility. """ assert args[CmdArgs.n_jobs] > 0, "Must have non-zero jobs for dry run" # Taking in file pointer since then we can test without actual file. Could also build generator that returns lines # to write. manual_setup_info = XRSerializer.init_db_manual(args[CmdArgs.db_root], db=args[CmdArgs.db], keys=EXP_VARS) warnings.warn(manual_setup_info, UserWarning) # Get the commands dry_run_commands = {} G = gen_commands(args, opt_file_lookup, run_uuid) for (_, _, _, optimizer, _), full_cmd in G: cmd_str = shell_join(full_cmd) dry_run_commands.setdefault(optimizer, []).append(cmd_str) # Make sure we never have any empty jobs, which is a waste n_commands = sum(len(v) for v in dry_run_commands.values()) n_jobs = min(args[CmdArgs.n_jobs], n_commands) # Would prob also work with pyrandom, but only tested np random so far subcommands = strat_split(list(dry_run_commands.values()), n_jobs, random=random) # Make sure have same commands overall, delete once we trust strat_split assert sorted(np.concatenate(subcommands)) == sorted( sum(list(dry_run_commands.values()), [])) job_suffix = run_uuid.hex[:UUID_JOB_CHARS] # Include comments as reproducibility lines args_str = serializable_dict(args) fp.write("# running: %s\n" % str(args_str)) fp.write("# cmd: %s\n" % cmd.cmd_str()) for ii, ii_str in range_str(n_jobs): assert len(subcommands[ii]) > 0 fp.write("job_%s_%s %s\n" % (job_suffix, ii_str, " && ".join(subcommands[ii])))
def main(): """See README for instructions on calling db_init. """ description = "Initialize the directories for running the experiments" args = cmd.parse_args(cmd.general_parser(description)) assert not args[ CmdArgs.dry_run], "Dry run doesn't make any sense when building dirs" logger.setLevel(logging.INFO) # Note this is the module-wide logger if args[CmdArgs.verbose]: logger.addHandler(logging.StreamHandler()) XRSerializer.init_db(args[CmdArgs.db_root], db=args[CmdArgs.db], keys=EXP_VARS, exist_ok=EXIST_OK) logger.info("done")
def do_baseline(args): # pragma: io """Alternate entry into the program without calling the actual main. """ # Load in the eval data and sanity check perf_ds, meta = XRSerializer.load_derived(args[CmdArgs.db_root], db=args[CmdArgs.db], key=cc.EVAL_RESULTS) logger.info("Meta data from source file: %s" % str(meta["args"])) D = OrderedDict() for kk in perf_ds: perf_da = perf_ds[kk] D[(kk,)] = compute_baseline(perf_da) baseline_ds = ds_concat(D, dims=(cc.OBJECTIVE,)) # Keep in same order for cleanliness baseline_ds = baseline_ds.sel({cc.OBJECTIVE: list(perf_ds)}) assert list(perf_ds) == baseline_ds.coords[cc.OBJECTIVE].values.tolist() # Could optionally remove this once we think things have enough tests for kk in D: assert baseline_ds.sel({cc.OBJECTIVE: kk[0]}, drop=True).identical(D[kk]) # Now dump the results XRSerializer.save_derived(baseline_ds, meta, args[CmdArgs.db_root], db=args[CmdArgs.db], key=cc.BASELINE)
def main(): """See README for instructions on calling analysis. """ description = "Analyze results from aggregated studies" args = parse_args(general_parser(description)) logger.setLevel(logging.INFO) # Note this is the module-wide logger if args[CmdArgs.verbose]: logger.addHandler(logging.StreamHandler()) # Load in the eval data and sanity check perf_da, meta = XRSerializer.load_derived(args[CmdArgs.db_root], db=args[CmdArgs.db], key=cc.EVAL_RESULTS) perf_da = xru.only_dataarray(perf_da) logger.info("Meta data from source file: %s" % str(meta["args"])) # Check if there is baselines file, other make one if cc.BASELINE not in XRSerializer.get_derived_keys(args[CmdArgs.db_root], db=args[CmdArgs.db]): warnings.warn("Baselines not found. Need to construct baseline.") do_baseline(args) # Load in baseline scores data and sanity check (including compatibility with eval data) baseline_ds, meta_ref = XRSerializer.load_derived(args[CmdArgs.db_root], db=args[CmdArgs.db], key=cc.BASELINE) logger.info("baseline data from source ref file: %s" % str(meta_ref["args"])) # Check test case signatures match between eval data and baseline data sig_errs, signatures = analyze_signature_pair(meta["signature"], meta_ref["signature"]) logger.info("Signature errors:\n%s" % sig_errs.to_string()) # Do the actual computation agg_result, summary = compute_aggregates(perf_da, baseline_ds) final_score = summary[PERF_MED][{ITER: -1}] logger.info("median score @ %d:\n%s" % (summary.sizes[ITER], xru.da_to_string(final_score))) final_score = summary[PERF_MEAN][{ITER: -1}] logger.info("mean score @ %d:\n%s" % (summary.sizes[ITER], xru.da_to_string(final_score))) final_score = summary[NORMED_MEAN][{ITER: -1}] logger.info("normed mean score @ %d:\n%s" % (summary.sizes[ITER], xru.da_to_string(final_score))) # Now saving results meta = {"args": serializable_dict(args), "signature": signatures} XRSerializer.save_derived(agg_result, meta, args[CmdArgs.db_root], db=args[CmdArgs.db], key=cc.PERF_RESULTS) XRSerializer.save_derived(summary, meta, args[CmdArgs.db_root], db=args[CmdArgs.db], key=cc.MEAN_SCORE) logger.info("done")
def partial_report(args: argparse.Namespace) -> None: db_root = os.path.abspath("runs") summary, _ = XRSerializer.load_derived(db_root, db=_DB, key=cc.MEAN_SCORE) # Following bayesmark way of constructing leaderboard. # https://github.com/uber/bayesmark/blob/8c420e935718f0d6867153b781e58943ecaf2338/bayesmark/experiment_analysis.py#L324-L328 scores = summary["mean"].sel({"objective": cc.VISIBLE_TO_OPT}, drop=True)[{ "iter": -1 }] leaderboard = (100 * (1 - scores)).to_series().to_dict() sorted_lb = { k: v for k, v in sorted( leaderboard.items(), key=lambda i: i[1], reverse=True) } filename = f"{args.dataset}-{args.model}-partial-report.json" with open(os.path.join("partial", filename), "w") as file: json.dump(sorted_lb, file)
def make_plots(args: argparse.Namespace) -> None: # https://github.com/uber/bayesmark/blob/master/notebooks/plot_test_case.ipynb db_root = os.path.abspath("runs") summary, _ = XRSerializer.load_derived(db_root, db=_DB, key=cc.PERF_RESULTS) plot_warmup = json.loads(args.plot_warmup) fig = plt.figure(figsize=(18, 8)) gs = fig.add_gridspec(1, 2) axs = gs.subplots() for benchmark in summary.coords["function"].values: for metric, ax in zip(["mean", "median"], axs): make_plot(summary, ax, benchmark, metric, plot_warmup) handles, labels = ax.get_legend_handles_labels() fig.legend(handles, labels) fig.suptitle(benchmark) fig.savefig( os.path.join("plots", f"optuna-{args.dataset}-{args.model}-sumamry.png"))
def real_run(args, opt_file_lookup, run_uuid): # pragma: io """Run sequence of independent experiments to fully run the benchmark. This uses `subprocess` to launch a separate process (in serial) for each experiment. Parameters ---------- args : dict(CmdArgs, [int, str]) Arguments of options to pass to the experiments being launched. The keys corresponds to the same arguments passed to this program. opt_file_lookup : dict(str, str) Mapping from method name to filename containing wrapper class for the method. run_uuid : uuid.UUID UUID for this launcher run. Needed to generate different experiments UUIDs on each call. This function is deterministic provided the same `run_uuid`. """ args[CmdArgs.db] = XRSerializer.init_db(args[CmdArgs.db_root], db=args[CmdArgs.db], keys=EXP_VARS, exist_ok=True) logger.info( "Supply --db %s to append to this experiment or reproduce jobs file." % args[CmdArgs.db]) # Get and run the commands in a sub-process ran, failed = 0, 0 G = gen_commands(args, opt_file_lookup, run_uuid) for _, full_cmd in G: status = call(full_cmd, shell=False, cwd=args[CmdArgs.optimizer_root]) ran += 1 if status != 0: failed += 1 warnings.warn( "status code %d returned from:\n%s" % (status, " ".join(full_cmd)), RuntimeWarning) logger.info("%d failures of benchmark script after %d studies." % (failed, ran))
def experiment_main(opt_class, args=None): # pragma: main """This is in effect the `main` routine for this experiment. However, it is called from the optimizer wrapper file so the class can be passed in. The optimizers are assumed to be outside the package, so the optimizer class can't be named from inside the main function without using hacky stuff like `eval`. """ if args is None: description = "Run a study with one benchmark function and an optimizer" args = cmd.parse_args(cmd.experiment_parser(description)) args[CmdArgs.opt_rev] = opt_class.get_version() run_uuid = uuid.UUID(args[CmdArgs.uuid]) logging.captureWarnings(True) # Setup logging to both a file and stdout (if verbose is set to True) logger.setLevel(logging.INFO) # Note this is the module-wide logger logfile = XRSerializer.logging_path(args[CmdArgs.db_root], args[CmdArgs.db], run_uuid) logger_file_handler = logging.FileHandler(logfile, mode="w") logger.addHandler(logger_file_handler) if args[CmdArgs.verbose]: logger.addHandler(logging.StreamHandler()) warnings_logger = logging.getLogger("py.warnings") warnings_logger.addHandler(logger_file_handler) if args[CmdArgs.verbose]: warnings_logger.addHandler(logging.StreamHandler()) logger.info("running: %s" % str(cmd.serializable_dict(args))) logger.info("cmd: %s" % cmd.cmd_str()) assert (args[CmdArgs.metric] in METRICS_LOOKUP[get_problem_type(args[CmdArgs.data])] ), "reg/clf metrics can only be used on compatible dataset" # Setup random streams for computing the signature, must use same seed # across all runs to ensure signature is consistent. This seed is random: _setup_seeds( "7e9f2cabb0dd4f44bc10cf18e440b427") # pragma: allowlist secret signature = get_objective_signature(args[CmdArgs.classifier], args[CmdArgs.data], args[CmdArgs.metric], data_root=args[CmdArgs.data_root]) logger.info("computed signature: %s" % str(signature)) opt_kwargs = load_optimizer_kwargs(args[CmdArgs.optimizer], args[CmdArgs.optimizer_root]) # Setup the call back for intermediate logging if cc.BASELINE not in XRSerializer.get_derived_keys(args[CmdArgs.db_root], db=args[CmdArgs.db]): warnings.warn("Baselines not found. Will not log intermediate scores.") callback = None else: test_case_str = SklearnModel.test_case_str(args[CmdArgs.classifier], args[CmdArgs.data], args[CmdArgs.metric]) optimizer_str = str_join_safe( ARG_DELIM, (args[CmdArgs.optimizer], args[CmdArgs.opt_rev], args[CmdArgs.rev])) baseline_ds, baselines_meta = XRSerializer.load_derived( args[CmdArgs.db_root], db=args[CmdArgs.db], key=cc.BASELINE) # Check the objective function signatures match in the baseline file sig_errs, _ = analyze_signature_pair({test_case_str: signature[1]}, baselines_meta["signature"]) logger.info("Signature errors:\n%s" % sig_errs.to_string()) print(json.dumps({"exp sig errors": sig_errs.T.to_dict()})) def log_mean_score_json(evals, iters): assert evals.shape == (len(OBJECTIVE_NAMES), ) assert not np.any(np.isnan(evals)) log_msg = { cc.TEST_CASE: test_case_str, cc.METHOD: optimizer_str, cc.TRIAL: args[CmdArgs.uuid], cc.ITER: iters, } for idx, obj in enumerate(OBJECTIVE_NAMES): assert OBJECTIVE_NAMES[idx] == obj # Extract relevant rescaling info slice_ = {cc.TEST_CASE: test_case_str, OBJECTIVE: obj} best_opt = baseline_ds[cc.PERF_BEST].sel( slice_, drop=True).values.item() base_clip_val = baseline_ds[cc.PERF_CLIP].sel( slice_, drop=True).values.item() # Perform the same rescaling as found in experiment_analysis.compute_aggregates() score = linear_rescale(evals[idx], best_opt, base_clip_val, 0.0, 1.0, enforce_bounds=False) # Also, clip the score from below at -1 to limit max influence of single run on final average score = np.clip(score, -1.0, 1.0) score = score.item() # Make easiest for logging in JSON assert isinstance(score, float) # Note: This is not the raw score but the rescaled one! log_msg[obj] = score log_msg = json.dumps(log_msg) print(log_msg, flush=True) # One second safety delay to protect against subprocess stdout getting lost sleep(1) callback = log_mean_score_json # Now set the seeds for the actual experiment _setup_seeds(args[CmdArgs.uuid]) # Now do the experiment logger.info("starting sklearn study %s %s %s %s %d %d" % ( args[CmdArgs.optimizer], args[CmdArgs.classifier], args[CmdArgs.data], args[CmdArgs.metric], args[CmdArgs.n_calls], args[CmdArgs.n_suggest], )) logger.info("with data root: %s" % args[CmdArgs.data_root]) function_evals, timing, suggest_log = run_sklearn_study( opt_class, opt_kwargs, args[CmdArgs.classifier], args[CmdArgs.data], args[CmdArgs.metric], args[CmdArgs.n_calls], args[CmdArgs.n_suggest], data_root=args[CmdArgs.data_root], callback=callback, ) # Curate results into clean dataframes eval_ds = build_eval_ds(function_evals, OBJECTIVE_NAMES) time_ds = build_timing_ds(*timing) suggest_ds = build_suggest_ds(suggest_log) # setup meta: meta = {"args": cmd.serializable_dict(args), "signature": signature} logger.info("saving meta data: %s" % str(meta)) # Now the final IO to export the results logger.info("saving results") XRSerializer.save(eval_ds, meta, args[CmdArgs.db_root], db=args[CmdArgs.db], key=cc.EVAL, uuid_=run_uuid) logger.info("saving timing") XRSerializer.save(time_ds, meta, args[CmdArgs.db_root], db=args[CmdArgs.db], key=cc.TIME, uuid_=run_uuid) logger.info("saving suggest log") XRSerializer.save(suggest_ds, meta, args[CmdArgs.db_root], db=args[CmdArgs.db], key=cc.SUGGEST_LOG, uuid_=run_uuid) logger.info("done")
def test_init_db_manual(db_root, keys, db): XRSerializer.init_db_manual(db_root, keys, db)
def test_validate(db_root, keys, db): XRSerializer._validate(db_root, keys, db)
def test_key_to_fname(key): ff = XRSerializer._key_to_fname(key) kk = XRSerializer._fname_to_key(ff) assert key == kk
def experiment_main(opt_class, args=None): # pragma: main """This is in effect the `main` routine for this experiment. However, it is called from the optimizer wrapper file so the class can be passed in. The optimizers are assumed to be outside the package, so the optimizer classs can't be named from inside the main function without using hacky stuff like `eval`. """ if args is None: description = "Run a study with one benchmark function and an optimizer" args = cmd.parse_args(cmd.experiment_parser(description)) args[CmdArgs.opt_rev] = opt_class.get_version() run_uuid = uuid.UUID(args[CmdArgs.uuid]) logging.captureWarnings(True) # Setup logging to both a file and stdout (if verbose is set to True) logger.setLevel(logging.INFO) # Note this is the module-wide logger logfile = XRSerializer.logging_path(args[CmdArgs.db_root], args[CmdArgs.db], run_uuid) logger_file_handler = logging.FileHandler(logfile, mode="w") logger.addHandler(logger_file_handler) if args[CmdArgs.verbose]: logger.addHandler(logging.StreamHandler()) warnings_logger = logging.getLogger("py.warnings") warnings_logger.addHandler(logger_file_handler) if args[CmdArgs.verbose]: warnings_logger.addHandler(logging.StreamHandler()) logger.info("running: %s" % str(cmd.serializable_dict(args))) logger.info("cmd: %s" % cmd.cmd_str()) assert (args[CmdArgs.metric] in METRICS_LOOKUP[get_problem_type(args[CmdArgs.data])] ), "reg/clf metrics can only be used on compatible dataset" # Setup random streams for computing the signature, must use same seed # across all runs to ensure signature is consistent. This seed is random: _setup_seeds( "7e9f2cabb0dd4f44bc10cf18e440b427") # pragma: allowlist secret signature = get_objective_signature(args[CmdArgs.classifier], args[CmdArgs.data], args[CmdArgs.metric], data_root=args[CmdArgs.data_root]) logger.info("computed signature: %s" % str(signature)) opt_kwargs = load_optimizer_kwargs(args[CmdArgs.optimizer], args[CmdArgs.optimizer_root]) # Now set the seeds for the actual experiment _setup_seeds(args[CmdArgs.uuid]) # Now do the experiment logger.info("starting sklearn study %s %s %s %s %d %d" % ( args[CmdArgs.optimizer], args[CmdArgs.classifier], args[CmdArgs.data], args[CmdArgs.metric], args[CmdArgs.n_calls], args[CmdArgs.n_suggest], )) logger.info("with data root: %s" % args[CmdArgs.data_root]) function_evals, timing = run_sklearn_study( opt_class, opt_kwargs, args[CmdArgs.classifier], args[CmdArgs.data], args[CmdArgs.metric], args[CmdArgs.n_calls], args[CmdArgs.n_suggest], data_root=args[CmdArgs.data_root], ) # Curate results into clean dataframes eval_ds = build_eval_ds(function_evals) time_ds = build_timing_ds(*timing) # setup meta: meta = {"args": cmd.serializable_dict(args), "signature": signature} logger.info("saving meta data: %s" % str(meta)) # Now the final IO to export the results logger.info("saving results") XRSerializer.save(eval_ds, meta, args[CmdArgs.db_root], db=args[CmdArgs.db], key=cc.EVAL, uuid_=run_uuid) logger.info("saving timing") XRSerializer.save(time_ds, meta, args[CmdArgs.db_root], db=args[CmdArgs.db], key=cc.TIME, uuid_=run_uuid) logger.info("done")