コード例 #1
0
    def run(self, limits, minimize, dynamic_optimizing, field_for_optimizing,
            project_path, scope, batches):
        super().run(minimize, dynamic_optimizing, field_for_optimizing,
                    project_path)

        optimization_problem_parameters = []
        init = []
        bounds = []
        for key in limits.keys():
            pars = {
                "name": key,
                "category": "uniform",
                "search_space": {
                    "low": limits[key]["low"],
                    "high": limits[key]["high"],
                    "step": limits[key]["step"]
                }
            }
            self.fields.append(key)
            optimization_problem_parameters.append(pars)

        best_sample = benderopt.minimize(
            f=self.box,
            optimization_problem_parameters=optimization_problem_parameters,
            number_of_evaluation=scope,
            debug=False)
        return best_sample
コード例 #2
0
def benchmark_simple(
    function_to_optimize,
    optimization_problem,
    target,
    methods,
    number_of_evaluations,
    seeds,
    parallel=True,
):
    """Aim to benchmark analytics functions of multiple variables"""
    results_tmp = {}

    assert set(target.keys()) == set([x["name"] for x in optimization_problem])
    parameters = set(target.keys())

    trials = list(product(seeds, number_of_evaluations, methods))
    print("Number of trials:", len(trials))
    if parallel is True:
        best_samples = Parallel(n_jobs=-1, verbose=8)(delayed(minimize)(
            function_to_optimize,
            optimization_problem,
            optimizer_type=method,
            number_of_evaluation=number_of_evaluation,
            seed=seed,
        ) for seed, number_of_evaluation, method in trials)
    else:
        best_samples = [
            minimize(
                function_to_optimize,
                optimization_problem,
                optimizer_type=method,
                number_of_evaluation=number_of_evaluation,
                seed=seed,
            ) for seed, number_of_evaluation, method in tqdm.tqdm(trials)
        ]

    for (seed, number_of_evaluation,
         method), best in zip(trials, best_samples):
        results_tmp.setdefault(method, {}).setdefault(
            number_of_evaluation, []).append(
                np.sqrt(
                    np.sum([(best[parameter] - target[parameter])**2
                            for parameter in parameters])))

    results = {
        method: {
            n: {
                "mean": np.mean(values),
                "std": np.std(values)
            }
            for n, values in ns.items()
        }
        for method, ns in results_tmp.items()
    }
    return results
コード例 #3
0
ファイル: base.py プロジェクト: tchar/benderopt
def benchmark_value(
    function_to_optimize,
    optimization_problem,
    target,
    methods,
    number_of_evaluations,
    seeds,
    parallel=True,
):
    """Aim to benchmark analytics functions of multiple variables"""
    results_tmp = {}

    trials = list(product(seeds, number_of_evaluations, methods))
    print("Number of trials:", len(trials))
    if parallel is True:
        best_samples = Parallel(n_jobs=-1, verbose=8)(delayed(minimize)(
            function_to_optimize,
            optimization_problem,
            optimizer_type=method,
            number_of_evaluation=number_of_evaluation,
            seed=seed,
        ) for seed, number_of_evaluation, method in trials)
    else:
        best_samples = [
            minimize(
                function_to_optimize,
                optimization_problem,
                optimizer_type=method,
                number_of_evaluation=number_of_evaluation,
                seed=seed,
            ) for seed, number_of_evaluation, method in tqdm.tqdm(trials)
        ]

    for (seed, number_of_evaluation,
         method), best in zip(trials, best_samples):
        results_tmp.setdefault(method, {}).setdefault(
            number_of_evaluation,
            []).append(np.abs(function_to_optimize(**best) - target))

    results = {
        method: {
            n: {
                "mean": np.mean(values),
                "std": np.std(values),
            }
            for n, values in ns.items()
        }
        for method, ns in results_tmp.items()
    }
    return results