Ejemplo n.º 1
0
    def get_tuned_config(self, scenario: ASlibScenario, 
                         runcount_limit:int=42,
                         wallclock_limit:int=300,
                         autofolio_config:dict=dict(),
                         seed:int=42):
        '''
            uses SMAC3 to determine a well-performing configuration in the configuration space self.cs on the given scenario

            Arguments
            ---------
            scenario: ASlibScenario
                ASlib Scenario at hand
            runcount_limit: int
                runcount_limit for SMAC scenario
            wallclock_limit: int
                wallclock limit in sec for SMAC scenario
                (overwritten by autofolio_config)
            autofolio_config: dict, or None
                An optional dictionary of configuration options
            seed: int
                random seed for SMAC

            Returns
            -------
            Configuration
                best incumbent configuration found by SMAC
        '''

        wallclock_limit = autofolio_config.get("wallclock_limit", wallclock_limit)
        runcount_limit = autofolio_config.get("runcount_limit", runcount_limit)

        taf = functools.partial(self.called_by_smac, scenario=scenario)
        max_fold = scenario.cv_data.max().max()
        max_fold = int(max_fold)

        ac_scenario = Scenario({"run_obj": "quality",  # we optimize quality
                                "runcount-limit": runcount_limit,
                                "cs": self.cs,  # configuration space
                                "deterministic": "true",
                                "instances": [[str(i)] for i in range(1, max_fold+1)],
                                "wallclock-limit": wallclock_limit,
                                "output-dir" : "" if not autofolio_config.get("output-dir",None) else autofolio_config.get("output-dir") 
                                })

        # necessary to use stats options related to scenario information
        AC_Stats.scenario = ac_scenario

        # Optimize
        self.logger.info(
            ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
        self.logger.info("Start Configuration")
        self.logger.info(
            ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
        smac = SMAC(scenario=ac_scenario, tae_runner=taf,
                    rng=np.random.RandomState(seed))
        incumbent = smac.optimize()

        self.logger.info("Final Incumbent: %s" % (incumbent))

        return incumbent
    def perform_optimization(self, optimization_time_budget):
        def _evaluate_config(config):
            vector = self._smac_dict_to_vector(config.get_dictionary())
            score = self._score_candidate(vector)
            return 1 - score

        if not self._is_stop_event_set():
            smac = SMAC4HPO(
                scenario=self._create_scenario(optimization_time_budget),
                tae_runner=_evaluate_config,
                rng=self.numpy_random_state,
                runhistory=self._create_run_history(),
            )
            try:
                candidate = smac.optimize()
            finally:
                candidate = smac.solver.incumbent
            vector = self._smac_dict_to_vector(candidate.get_dictionary())
            score = self._score_candidate(vector)
            if score > self.best_score:
                self.best_score = score
                self.best_candidate = self._smac_dict_to_vector(
                    candidate.get_dictionary())
        return (
            self.parameter_domain.config_from_vector(self.best_candidate),
            self.best_score,
        )
Ejemplo n.º 3
0
 def test_exchange_sobol_for_lhd(self):
     cs = ConfigurationSpace()
     for i in range(40):
         cs.add_hyperparameter(
             UniformFloatHyperparameter('x%d' % (i + 1), 0, 1))
     scenario = Scenario({'cs': cs, 'run_obj': 'quality'})
     facade = SMAC4HPO(scenario=scenario)
     self.assertIsInstance(facade.solver.initial_design, SobolDesign)
     cs.add_hyperparameter(UniformFloatHyperparameter('x41', 0, 1))
     with self.assertRaisesRegex(
             ValueError,
             'Sobol sequence" can only handle up to 40 dimensions. Please use a different initial design, such as '
             '"the Latin Hypercube design"',
     ):
         SMAC4HPO(scenario=scenario)
     self.output_dirs.append(scenario.output_dir)
Ejemplo n.º 4
0
    def optimize(self):

        cs = ConfigurationSpace()

        if 'optimizer' in self.params:

            optimizer = CategoricalHyperparameter('optimizer',
                                                  self.params['optimizer'])

            cs.add_hyperparameter(optimizer)

        scenario = Scenario({
            "run_obj": "quality",
            "runcount-limit": 5,
            "cutoff-time": 10,
            "cs": cs,
            "deterministic": "true"
        })

        print(cs.get_default_configuration())

        def_value = self.svm_from_config(cs.get_default_configuration())

        smac = SMAC4HPO(scenario=scenario,
                        rng=np.random.RandomState(42),
                        tae_runner=self.svm_from_config)

        incumbent = smac.optimize()

        __params = incumbent.get_dictionary()

        inc_value = self.svm_from_config(incumbent)

        print(__params)

        model = create_model(**__params)

        hist = model.fit(self.x_train, self.y_train, batch_size=128, epochs=6)

        time = smac.stats.wallclock_time_used

        loss, accuracy, f1_score, precision, recall = model.evaluate(
            self.x_test, self.y_test, verbose=0)

        del smac

        self.result = {
            'accuracy': accuracy,
            'time': time,
            'best_params': __params
        }
Ejemplo n.º 5
0
def main():
    for synthetic_function_cls in synthetic_functions:
        meta_info = synthetic_function_cls.get_meta_information()
        if "num_function_evals" in meta_info:
            max_iter = meta_info["num_function_evals"]
        else:
            max_iter = base_max_iter
        # 构造超参空间
        config_space = ConfigurationSpace()
        config_space.generate_all_continuous_from_bounds(
            synthetic_function_cls.get_meta_information()['bounds'])
        synthetic_function = synthetic_function_cls()

        # 定义目标函数
        def evaluation(config: dict):
            config = Configuration(config_space, values=config)
            return synthetic_function.objective_function(config)["function_value"] - \
                   synthetic_function.get_meta_information()["f_opt"]

        res = pd.DataFrame(columns=[f"trial-{i}" for i in range(repetitions)],
                           index=range(max_iter))
        print(meta_info["name"])
        for trial in range(repetitions):
            random_state = base_random_state + 10 * trial
            # Scenario object
            scenario = Scenario({
                "run_obj":
                "quality",  # we optimize quality (alternatively runtime)
                "runcount-limit": max_iter,
                # max. number of function evaluations; for this example set to a low number
                "cs": config_space,  # configuration space
                "deterministic": "true"
            })
            smac = SMAC4HPO(scenario=scenario,
                            rng=np.random.RandomState(random_state),
                            tae_runner=evaluation,
                            initial_design_kwargs={"init_budget": 20})
            incumbent = smac.optimize()
            runhistory = smac.runhistory
            configs = runhistory.get_all_configs()
            losses = [runhistory.get_cost(config) for config in configs]
            res[f"trial-{trial}"] = losses
            print(min(losses))
        res = raw2min(res)
        m = res.mean(1)
        s = res.std(1)
        name = synthetic_function.get_meta_information()["name"]
        final_result[name] = {"mean": m.tolist(), "std": s.tolist()}
    Path(f"SMAC3.json").write_text(json.dumps(final_result))
Ejemplo n.º 6
0
def create_or_restore_smac(scenario_dict, rng, tae):
    out_dir = path.join(scenario_dict['output_dir'], 'run_1')
    if True or not isfile(path.join(out_dir, "traj_aclib2.json")):
        # if some incomplete data lays arround, delete it completely
        shutil.rmtree(out_dir, ignore_errors=True)
        scenario = Scenario(scenario_dict)
        smac = SMAC4HPO(scenario=scenario,
                        rng=rng,
                        tae_runner=tae,
                        initial_design=SobolDesign,
                        run_id=1)
    else:
        new_scenario = Scenario(scenario_dict)
        rh_path = path.join(out_dir, "runhistory.json")
        runhistory = RunHistory(aggregate_func=None)
        runhistory.load_json(rh_path, new_scenario.cs)
        # ... stats, ...
        stats_path = path.join(out_dir, "stats.json")
        stats = Stats(new_scenario)
        stats.load(stats_path)
        # ... and trajectory.
        traj_path = path.join(out_dir, "traj_aclib2.json")
        trajectory = TrajLogger.read_traj_aclib_format(fn=traj_path,
                                                       cs=new_scenario.cs)
        incumbent = trajectory[-1]["incumbent"]

        # Now we can initialize SMAC with the recovered objects and restore the
        # state where we left off. By providing stats and a restore_incumbent, SMAC
        # automatically detects the intention of restoring a state.
        smac = SMAC4HPO(scenario=new_scenario,
                        runhistory=runhistory,
                        stats=stats,
                        restore_incumbent=incumbent,
                        run_id=1)
        print('restored smac from:', out_dir)
    return smac
Ejemplo n.º 7
0
def func(x):
    dir="/user/tqc/test_dir"
    # Scenario object
    scenario = Scenario({"run_obj": "quality",  # we optimize quality (alternatively runtime)
                         "runcount-limit": 50,
                         # max. number of function evaluations; for this example set to a low number
                         "cs": cs,  # configuration space
                         "deterministic": "true",
                         "shared_model": True,
                         "input_psmac_dirs": dir,
                         'output_dir': dir,
                         }, runtime='spark',
                        spark_config={'hdfs_url': 'http://0.0.0.0:50070'})
    smac = SMAC4HPO(scenario=scenario, rng=np.random.RandomState(42),
                    tae_runner=svm_from_cfg)
    return smac.optimize()
Ejemplo n.º 8
0
def fmin_smac_nopynisher(func, x0, bounds, maxfun, rng):
    """
    Minimize a function using SMAC, but without pynisher, which doesn't work
    well with benchmark_minimize_callable.
    
    This function is based on SMAC's fmin_smac.
    """
    cs = ConfigurationSpace()
    tmplt = 'x{0:0' + str(len(str(len(bounds)))) + 'd}'
    for idx, (lower_bound, upper_bound) in enumerate(bounds):
        parameter = UniformFloatHyperparameter(
            name=tmplt.format(idx + 1),
            lower=lower_bound,
            upper=upper_bound,
            default_value=x0[idx],
        )
        cs.add_hyperparameter(parameter)

    scenario_dict = {
        "run_obj": "quality",
        "cs": cs,
        "deterministic": "true",
        "initial_incumbent": "DEFAULT",
        "runcount_limit": maxfun,
    }
    scenario = Scenario(scenario_dict)

    def call_ta(config):
        x = np.array(
            [val for _, val in sorted(config.get_dictionary().items())],
            dtype=np.float)
        return func(x)

    smac = SMAC4HPO(
        scenario=scenario,
        tae_runner=ExecuteTAFuncArray,
        tae_runner_kwargs={
            'ta': call_ta,
            'use_pynisher': False
        },
        rng=rng,
        initial_design=RandomConfigurations,
    )

    smac.optimize()
    return
Ejemplo n.º 9
0
def best_hyperparams_smac():
    iteration = 1
    cs = ConfigurationSpace()
    cs.add_hyperparameters(SVD_SMAC_SPACE.values())
    scenario = Scenario({
        "run_obj": "quality",  # we optimize quality (alternatively runtime)
        "runcount-limit":
        100,  # max. number of function evaluations; for this example set to a low number
        "cs": cs,  # configuration space
        "deterministic": "true"
    })
    smac = SMAC4HPO(
        scenario=scenario,
        rng=np.random.RandomState(42),
        tae_runner=_hyperopt,
    )
    smac.optimize()
Ejemplo n.º 10
0
def main(benchmark_name, dataset_name, dimensions, method_name, num_runs,
         run_start, num_iterations, input_dir, output_dir):

    benchmark = make_benchmark(benchmark_name,
                               dimensions=dimensions,
                               dataset_name=dataset_name,
                               input_dir=input_dir)
    name = make_name(benchmark_name,
                     dimensions=dimensions,
                     dataset_name=dataset_name)

    output_path = Path(output_dir).joinpath(name, method_name)
    output_path.mkdir(parents=True, exist_ok=True)

    options = dict()
    with output_path.joinpath("options.yaml").open('w') as f:
        yaml.dump(options, f)

    def objective(config, seed):
        return benchmark.evaluate(config).value

    for run_id in range(run_start, num_runs):

        random_state = np.random.RandomState(run_id)
        scenario = Scenario({
            "run_obj": "quality",
            "runcount-limit": num_iterations,
            "cs": benchmark.get_config_space(),
            "deterministic": "true",
            "output_dir": "foo/"
        })
        run_history = RunHistory()

        smac = SMAC4HPO(scenario=scenario,
                        tae_runner=objective,
                        runhistory=run_history,
                        rng=random_state)
        smac.optimize()

        data = SMACLogs(run_history).to_frame()
        data.to_csv(output_path.joinpath(f"{run_id:03d}.csv"))

    return 0
Ejemplo n.º 11
0
def optimize_smac(problem, max_evals, rand_evals=1, deterministic=False, log=None):
    n = len(problem.vartype())
    nlog10 = math.ceil(math.log10(n))
    
    mon = Monitor(f"smac{'/det' if deterministic else ''}{'/ac' if n > 40 else ''}", problem, log=log)
    def f(cfg):
        xvec = np.array([cfg.get(f'v{varidx:0{nlog10}}') for varidx, t in enumerate(problem.vartype())])
        mon.commit_start_eval()
        r = float(problem.evaluate(xvec))
        mon.commit_end_eval(xvec, r)
        return r

    cs = get_variables(problem)

    sc = Scenario({
        "run_obj": "quality",
        "runcount-limit": max_evals,
        "cs": cs,
        "output_dir": None,
        "limit_resources": False, # Limiting resources stops the Monitor from working...
        "deterministic": deterministic
    })
    # smac = SMAC4HPO(scenario=sc, tae_runner=f)
    if n <= 40:
        smac = SMAC4HPO(scenario=sc, initial_design=RandomConfigurations, initial_design_kwargs={'init_budget': rand_evals}, tae_runner=f)
    else:
        smac = SMAC4AC(scenario=sc, initial_design=RandomConfigurations, initial_design_kwargs={'init_budget': rand_evals}, tae_runner=f)

    mon.start()
    result = smac.optimize()
    mon.end()

    # print(f"Best trial: {best_trial}")

    solX = [result[k] for k in result] 
    # print(f"Best point: {solX}")
    # Note, this runs the function again, just to compute the fitness again.
    # solY = f(solX)
    # We can also ask it from our evaluation monitor.
    solY = mon.best_fitness

    return solX, solY, mon
Ejemplo n.º 12
0
def smac_opt():
    # Import ConfigSpace and different types of parameters
    from smac.configspace import ConfigurationSpace
    from ConfigSpace.hyperparameters import UniformFloatHyperparameter
    # Import SMAC-utilities
    from smac.scenario.scenario import Scenario
    from smac.facade.smac_hpo_facade import SMAC4HPO

    n_params = 5

    def fun_to_optimize(x):
        from jnius import autoclass
        RunStrategy = autoclass('RunStrategy')

        params = [x[f'x{i}'] for i in range(0, n_params)]
        print(f'params:{params}')
        ret = -RunStrategy.runStrategyWithConfiguration(params, 20)
        print(ret)
        return ret

    cs = ConfigurationSpace()
    hyper_params = [
        UniformFloatHyperparameter(f"x{i}", 0, 100, default_value=1)
        for i in range(0, n_params)
    ]
    cs.add_hyperparameters(hyper_params)

    # Scenario object
    scenario = Scenario({
        "run_obj": "quality",  # we optimize quality (alternatively runtime)
        "runcount-limit":
        999999,  # max. number of function evaluations; for this example set to a low number
        "cs": cs,  # configuration space
        "deterministic": "false"
    })

    # Optimize, using a SMAC-object
    smac = SMAC4HPO(scenario=scenario,
                    rng=np.random.RandomState(42),
                    tae_runner=fun_to_optimize)

    smac.optimize()
Ejemplo n.º 13
0
    def __init__(self, scenario: typing.Union[ScenarioWithSavepoint, Scenario, ScenarioProperties], seed=1):
        self.scenario = scenario
        self.scenario.output_dir_for_this_run = os.path.join(self.scenario.output_dir, 'run_1')

        # Create defaults
        rh = None
        stats = None
        incumbent = None

        if os.path.exists(self.scenario.output_dir_for_this_run):
            rh, stats, incumbent = restore_state(self.scenario)

        self.smac = SMAC4HPO(scenario=self.scenario,
                             rng=np.random.RandomState(seed),
                             runhistory=rh,
                             initial_design_kwargs=dict(n_configs_x_params=1),
                             stats=stats,
                             restore_incumbent=incumbent,
                             run_id=1,
                             smbo_class=IncrementalSMBO,
                             tae_runner=self)
Ejemplo n.º 14
0
def optim_smac(args):
    tae = snake_from_config_wrapper("SMAC", args.num_opponents,
                                    args.num_games_per_eval, args.timeout)

    def_value = tae(cs.get_default_configuration(),
                    0,
                    num_opponents=args.num_opponents,
                    num_games_per_eval=args.num_games_per_eval,
                    timeout=args.timeout)
    print(
        f"Default Configuration evaluates to a win percentage of {(1 - def_value) * 100:.2f}%"
    )
    print(
        f"Starting Opimization with walltime of {args.walltime/3600:.2f} hours..."
    )

    scenario = Scenario({
        "run_obj": "quality",
        "runcount-limit": args.runcount_limit,
        "cs": cs,
        "deterministic": True,
        "wallclock_limit": args.walltime,
        "output_dir": args.output_dir,
        "shared_model": args.n_jobs > 1,
        "input_psmac_dirs": args.output_dir,
    })

    smac = SMAC4HPO(scenario=scenario,
                    rng=np.random.RandomState(RANDOMSTATES[0]),
                    tae_runner=tae)

    try:
        incumbent = smac.optimize()
    finally:
        incumbent = smac.solver.incumbent

    inc_value = tae(incumbent)
    print(
        f"Optimized Configuration {incumbent} evaluates to a win percentage of {(1 - inc_value) * 100:.2f}%"
    )
Ejemplo n.º 15
0
    def __init__(self,
                 configspace: ConfigurationSpace,
                 working_directory: str = '.'):
        super().__init__(configspace)

        self.working_directory = os.path.join(
            working_directory, f'smac/{random.randint(0, 10000000):d}/')

        smac_logger = logging.getLogger('smac')
        logging.getLogger('smac').setLevel(logging.WARNING)

        scenario = Scenario({
            'run_obj': 'quality',
            'deterministic': True,
            'shared-model': False,
            'cs': self.configspace,
            'output_dir': self.working_directory
        })
        scenario.logger = smac_logger

        self.smbo: SplitSMBO = SMAC4HPO(scenario=scenario,
                                        smbo_class=SplitSMBO).solver
        self.smbo.logger = smac_logger
        self.smbo.start()
Ejemplo n.º 16
0
def quant_post_hpo(
        executor,
        place,
        model_dir,
        quantize_model_path,
        train_sample_generator=None,
        eval_sample_generator=None,
        train_dataloader=None,
        eval_dataloader=None,
        eval_function=None,
        model_filename=None,
        params_filename=None,
        save_model_filename='__model__',
        save_params_filename='__params__',
        scope=None,
        quantizable_op_type=["conv2d", "depthwise_conv2d", "mul"],
        is_full_quantize=False,
        weight_bits=8,
        activation_bits=8,
        weight_quantize_type=['channel_wise_abs_max'],
        algo=["KL", "hist", "avg", "mse"],
        bias_correct=[True, False],
        hist_percent=[0.98, 0.999],  ### uniform sample in list.
        batch_size=[10, 30],  ### uniform sample in list.
        batch_num=[10, 30],  ### uniform sample in list.
        optimize_model=False,
        is_use_cache_file=False,
        cache_dir="./temp_post_training",
        runcount_limit=30):
    """
    The function utilizes static post training quantization method to
    quantize the fp32 model. It uses calibrate data to calculate the
    scale factor of quantized variables, and inserts fake quantization
    and dequantization operators to obtain the quantized model.

    Args:
        executor(paddle.static.Executor): The executor to load, run and save the
            quantized model.
        place(paddle.CPUPlace or paddle.CUDAPlace): This parameter represents
            the executor run on which device.
        model_dir(str): The path of fp32 model that will be quantized, and
            the model and params that saved by ``paddle.static.io.save_inference_model``
            are under the path.
        quantize_model_path(str): The path to save quantized model using api
            ``paddle.static.io.save_inference_model``.
        train_sample_generator(Python Generator): The sample generator provides
            calibrate data for DataLoader, and it only returns a sample every time.
        eval_sample_generator(Python Generator): The sample generator provides
            evalution data for DataLoader, and it only returns a sample every time.
        model_filename(str, optional): The name of model file. If parameters
            are saved in separate files, set it as 'None'. Default: 'None'.
        params_filename(str, optional): The name of params file.
                When all parameters are saved in a single file, set it
                as filename. If parameters are saved in separate files,
                set it as 'None'. Default : 'None'.
        save_model_filename(str): The name of model file to save the quantized inference program.  Default: '__model__'.
        save_params_filename(str): The name of file to save all related parameters.
                If it is set None, parameters will be saved in separate files. Default: '__params__'.
        scope(paddle.static.Scope, optional): The scope to run program, use it to load
                        and save variables. If scope is None, will use paddle.static.global_scope().
        quantizable_op_type(list[str], optional): The list of op types
                        that will be quantized. Default: ["conv2d", "depthwise_conv2d",
                        "mul"].
        is_full_quantize(bool): if True, apply quantization to all supported quantizable op type.
                        If False, only apply quantization to the input quantizable_op_type. Default is False.
        weight_bits(int, optional): quantization bit number for weights.
        activation_bits(int): quantization bit number for activation.
        weight_quantize_type(str): quantization type for weights,
                support 'abs_max' and 'channel_wise_abs_max'. Compared to 'abs_max',
                the model accuracy is usually higher when using 'channel_wise_abs_max'.
        optimize_model(bool, optional): If set optimize_model as True, it applies some
                passes to optimize the model before quantization. So far, the place of
                executor must be cpu it supports fusing batch_norm into convs.
        is_use_cache_file(bool): This param is deprecated.
        cache_dir(str): This param is deprecated.
        runcount_limit(int): max. number of model quantization.
    Returns:
        None
    """

    global g_quant_config
    g_quant_config = QuantConfig(
        executor, place, model_dir, quantize_model_path, algo, hist_percent,
        bias_correct, batch_size, batch_num, train_sample_generator,
        eval_sample_generator, train_dataloader, eval_dataloader,
        eval_function, model_filename, params_filename, save_model_filename,
        save_params_filename, scope, quantizable_op_type, is_full_quantize,
        weight_bits, activation_bits, weight_quantize_type, optimize_model,
        is_use_cache_file, cache_dir)
    cs = ConfigurationSpace()

    hyper_params = []

    if 'hist' in algo:
        hist_percent = UniformFloatHyperparameter(
            "hist_percent",
            hist_percent[0],
            hist_percent[1],
            default_value=hist_percent[0])
        hyper_params.append(hist_percent)

    if len(algo) > 1:
        algo = CategoricalHyperparameter("algo", algo, default_value=algo[0])
        hyper_params.append(algo)
    else:
        algo = algo[0]

    if len(bias_correct) > 1:
        bias_correct = CategoricalHyperparameter("bias_correct",
                                                 bias_correct,
                                                 default_value=bias_correct[0])
        hyper_params.append(bias_correct)
    else:
        bias_correct = bias_correct[0]
    if len(weight_quantize_type) > 1:
        weight_quantize_type = CategoricalHyperparameter("weight_quantize_type", \
            weight_quantize_type, default_value=weight_quantize_type[0])
        hyper_params.append(weight_quantize_type)
    else:
        weight_quantize_type = weight_quantize_type[0]
    if len(batch_size) > 1:
        batch_size = UniformIntegerHyperparameter("batch_size",
                                                  batch_size[0],
                                                  batch_size[1],
                                                  default_value=batch_size[0])
        hyper_params.append(batch_size)
    else:
        batch_size = batch_size[0]

    if len(batch_num) > 1:
        batch_num = UniformIntegerHyperparameter("batch_num",
                                                 batch_num[0],
                                                 batch_num[1],
                                                 default_value=batch_num[0])
        hyper_params.append(batch_num)
    else:
        batch_num = batch_num[0]

    if len(hyper_params) == 0:
        quant_post( \
            executor=g_quant_config.executor, \
            scope=g_quant_config.scope, \
            model_dir=g_quant_config.float_infer_model_path, \
            quantize_model_path=g_quant_model_cache_path, \
            sample_generator=g_quant_config.train_sample_generator, \
            data_loader=g_quant_config.train_dataloader,
            model_filename=g_quant_config.model_filename, \
            params_filename=g_quant_config.params_filename, \
            save_model_filename=g_quant_config.save_model_filename, \
            save_params_filename=g_quant_config.save_params_filename, \
            quantizable_op_type=g_quant_config.quantizable_op_type, \
            activation_quantize_type="moving_average_abs_max", \
            weight_quantize_type=weight_quantize_type, \
            algo=algo, \
            hist_percent=hist_percent, \
            bias_correction=bias_correct, \
            batch_size=batch_size, \
            batch_nums=batch_num)

        return

    cs.add_hyperparameters(hyper_params)

    scenario = Scenario({
        "run_obj": "quality",  # we optimize quality (alternative runtime)
        "runcount-limit":
        runcount_limit,  # max. number of function evaluations; for this example set to a low number
        "cs": cs,  # configuration space
        "deterministic": "True",
        "limit_resources": "False",
        "memory_limit":
        4096  # adapt this to reasonable value for your hardware
    })

    # To optimize, we pass the function to the SMAC-object
    smac = SMAC4HPO(scenario=scenario,
                    rng=np.random.RandomState(42),
                    tae_runner=quantize)

    # Example call of the function with default values
    # It returns: Status, Cost, Runtime, Additional Infos
    def_value = smac.get_tae_runner().run(cs.get_default_configuration(), 1)[1]
    print("Value for default configuration: %.8f" % def_value)

    # Start optimization
    try:
        incumbent = smac.optimize()
    finally:
        incumbent = smac.solver.incumbent

    inc_value = smac.get_tae_runner().run(incumbent, 1)[1]
    print("Optimized Value: %.8f" % inc_value)
    print("quantize completed")
Ejemplo n.º 17
0
def run_experiment(out_path: str, on_travis: bool = False):

    out_path = Path(out_path)
    out_path.mkdir(exist_ok=True)

    benchmark = Benchmark(container_source='library://phmueller/automl',
                          container_name='cartpole',
                          rng=1)

    cs = benchmark.get_configuration_space(seed=1)

    scenario_dict = {
        "run_obj": "quality",  # we optimize quality (alternative to runtime)
        "wallclock-limit":
        5 * 60 * 60,  # max duration to run the optimization (in seconds)
        "cs": cs,  # configuration space
        "deterministic": "true",
        "runcount-limit": 200,
        "limit_resources": True,  # Uses pynisher to limit memory and runtime
        "cutoff": 1800,  # runtime limit for target algorithm
        "memory_limit":
        10000,  # adapt this to reasonable value for your hardware
        "output_dir": str(out_path),
    }

    if on_travis:
        scenario_dict.update(get_travis_settings('smac'))

    scenario = Scenario(scenario_dict)

    # Number of Agents, which are trained to solve the cartpole experiment
    max_budget = 5 if not on_travis else 2

    def optimization_function_wrapper(cfg, seed, instance, budget):
        """ Helper-function: simple wrapper to use the benchmark with smac """
        b = Benchmark(container_source='library://phmueller/automl',
                      container_name='cartpole',
                      rng=seed)

        # Old API ---- NO LONGER SUPPORTED ---- This will simply ignore the fidelities
        # result_dict = b.objective_function(cfg, budget=int(budget))

        # New API ---- Use this
        result_dict = b.objective_function(cfg,
                                           fidelity={"budget": int(budget)})
        return result_dict['function_value']

    smac = SMAC4HPO(scenario=scenario,
                    rng=np.random.RandomState(42),
                    tae_runner=optimization_function_wrapper,
                    intensifier=SuccessiveHalving,
                    intensifier_kwargs={
                        'initial_budget': 1,
                        'max_budget': max_budget,
                        'eta': 3
                    })

    start_time = time()
    # Example call of the function with default values. It returns: Status, Cost, Runtime, Additional Infos
    def_value = smac.get_tae_runner().run(
        config=cs.get_default_configuration(), instance='1', budget=1,
        seed=0)[1]
    print(
        f"Value for default configuration: {def_value:.4f}.\nEvaluation took {time() - start_time:.0f}s"
    )

    # Start optimization
    start_time = time()
    try:
        smac.optimize()
    finally:
        incumbent = smac.solver.incumbent
    end_time = time()

    if not on_travis:
        inc_value = smac.get_tae_runner().run(config=incumbent,
                                              instance='1',
                                              budget=max_budget,
                                              seed=0)[1]
        print(f"Value for optimized configuration: {inc_value:.4f}.\n"
              f"Optimization took {end_time-start_time:.0f}s")
Ejemplo n.º 18
0
def smac_validation(
    model_function,
    term_library,
    X_train,
    U_train,
    X_eval,
    U_eval,
    n_iter,
    use_regularization,
):
    def evaluation_function(params: dict, instance, budget, **kwargs):
        terms = []

        for idx in range(len(term_library)):
            if params[str(idx)] > 0.5:
                terms.append(term_library[int(idx)])

        if use_regularization:
            regularization = params["reg"]
        else:
            regularization = 1.0

        errors = []

        model = model_function(terms, regularization)

        model.train_BFGS(X_train, U_train)
        U_hat = model.predict(X_eval)

        errors.append(rmse(U_eval, U_hat))

        model.cleanup()

        # Minimize
        best_error = np.min(errors)

        return best_error

    terms = []
    for i in range(len(term_library)):
        terms.append(UniformIntegerHyperparameter(
            str(i),
            0,
            1,
        ))

    cs = ConfigurationSpace()
    cs.add_hyperparameters(terms)

    if use_regularization:
        cs.add_hyperparameter(
            UniformFloatHyperparameter("reg", 1e-5, 100, log=True))

    scenario = Scenario({
        "run_obj": "quality",
        "cs": cs,
        "runcount-limit": n_iter,
        "limit_resources": False,
        "deterministic": True,
    })

    smac = SMAC4HPO(scenario=scenario, tae_runner=evaluation_function)

    incumbent = smac.optimize()

    return smac, incumbent
Ejemplo n.º 19
0
        # You can define individual crash costs for each objective
        "cost_for_crash": [1, float(MAXINT)],
    })

    # Example call of the function
    # It returns: Status, Cost, Runtime, Additional Infos
    def_value = svm_from_cfg(cs.get_default_configuration())
    print("Default config's cost: {cost:2f}, training time: {time:2f} seconds".
          format(**def_value))

    # Optimize, using a SMAC-object
    print(
        "Optimizing! Depending on your machine, this might take a few minutes."
    )
    # Pass the multi objective algorithm and its hyperparameters
    smac = SMAC4HPO(
        scenario=scenario,
        rng=np.random.RandomState(42),
        tae_runner=svm_from_cfg,
        multi_objective_algorithm=ParEGO,
        multi_objective_kwargs={
            "rho": 0.05,
        },
    )

    incumbent = smac.optimize()

    # pareto front based on smac.runhistory.data
    cost = np.vstack([v[0] for v in smac.runhistory.data.values()])
    plot_pareto_from_runhistory(cost)
Ejemplo n.º 20
0
            print('#' * 80)
            print('#' * 80)

            # SMAC scenario object
            scenario = Scenario({
                "run_obj":
                "quality",
                "runcount-limit":
                25,
                "cs":
                cs,
                "deterministic":
                "true",
                "output-dir":
                "/home/feurerm/projects/smac3parallel/new/%s" % str(n_workers),
            })

            # To optimize, we pass the function to the SMAC-object
            smac = SMAC4HPO(scenario=scenario,
                            rng=np.random.RandomState(seed),
                            tae_runner=branin.branin
                            if n_workers == 1 else branin.branin_sleep,
                            n_jobs=n_workers,
                            intensifier=SimpleIntensifier)
            incumbent = smac.optimize()
            del smac

            time.sleep(1)
            sys.stdout.flush()
            sys.stderr.flush()
Ejemplo n.º 21
0
    #                     "initial_incumbent": "DEFAULT",
})

i = int(sys.argv[1])
# Optimize, using a SMAC-object
print("Optimizing! Depending on your machine, this might take a few minutes.")
if i == 1:
    initial_configuration = [cs.get_default_configuration()]
else:
    initial_configuration = None

tae_runner = partial(gadma_from_cfg, i)
smac = SMAC4HPO(scenario=scenario,
                rng=np.random.RandomState(i),
                tae_runner=tae_runner,
                run_id=i,
                intensifier_kwargs=intensifier_kwargs,
                initial_design=None,
                initial_configurations=initial_configuration)

incumbent = smac.optimize()

def_costs = []
for i in INSTANCES:
    cost = smac.get_tae_runner().run(cs.get_default_configuration(), i[0])[1]
    def_costs.append(cost)
print("Value for default configuration: %.4f" % (np.mean(def_costs)))

inc_costs = []
for i in INSTANCES:
    cost = smac.get_tae_runner().run(incumbent, i[0])[1]
Ejemplo n.º 22
0
                                 tae_runner=tat,
                                 pca_components=2)
                hpo_result, info = run_smac_based_optimizer(hpo, tae)

                write_output(
                    f"[{name}] time={info['time']} train_loss={info['last_train_loss']} "
                    f"test_loss={info['last_test_loss']}\n")

                records = util.add_record(records, task_id, name, hpo_result)

                ########################################################################################################
                # SMAC
                ########################################################################################################
                name = "smac"
                print(f"\n[{name}] ")
                hpo = SMAC4HPO(scenario=scenario, rng=rng, tae_runner=tat)
                hpo_result, info = run_smac_based_optimizer(hpo, tae)

                write_output(
                    f"[{name}] time={info['time']} train_loss={info['last_train_loss']} "
                    f"test_loss={info['last_test_loss']}\n")

                records = util.add_record(records, task_id, name, hpo_result)

                ########################################################################################################
                # ROAR x2
                ########################################################################################################
                name = "roar_x2"
                print(f"\n[{name}] ")
                hpo = ROAR(scenario=scenario, rng=rng, tae_runner=tat)
                hpo_result, info = run_smac_based_optimizer(hpo, tae, speed=2)
Ejemplo n.º 23
0
    values, configs = readData(cs, "hydrogen_new_3.csv")

    scenario = Scenario({
        "run_obj": "quality",
        "runcount-limit": len(values) + 1,
        "cs": cs,
        "deterministic": "true",
        "limit-resources": "false"
    })
    for _ in range(100):
        smac = SMAC4HPO(
            scenario=scenario,
            rng=np.random.RandomState(random.randint(0, 1000000)),
            tae_runner=optFunc(values),
            # acquisition_function=EI,
            acquisition_function_kwargs={'par': 0.5},
            # runhistory2epm=RunHistory2EPM4LogCost,
            initial_design=None,
            initial_configurations=list(values.keys()),
        )
        try:
            smac.optimize()
        except Exception:
            pass

    data = []
    for config in results.keys():
        res = dict(config._values)
        res['flow'] = results[config]
        data.append(res)
    data.sort(key=lambda d: -d['flow'])
Ejemplo n.º 24
0
x1 = UniformFloatHyperparameter("x1", -5, 10, default_value=-4)
cs.add_hyperparameters([x0, x1])

# Scenario object
scenario = Scenario({"run_obj": "quality",  # we optimize quality (alternatively runtime)
                     "runcount-limit": 10,  # max. number of function evaluations; for this example set to a low number
                     "cs": cs,  # configuration space
                     "deterministic": "true"
                     })

# Example call of the function
# It returns: Status, Cost, Runtime, Additional Infos
def_value = rosenbrock_2d(cs.get_default_configuration())
print("Default Value: %.2f" % def_value)

# Optimize, using a SMAC-object
for acquisition_func in (LCB, EI, PI):
    print("Optimizing with %s! Depending on your machine, this might take a few minutes." % acquisition_func)
    smac = SMAC4HPO(scenario=scenario, rng=np.random.RandomState(42),
                    tae_runner=rosenbrock_2d,
                    initial_design=LHDesign,
                    initial_design_kwargs={'n_configs_x_params': 4,
                                           'max_config_fracs': 1.0},
                    runhistory2epm=RunHistory2EPM4InvScaledCost,
                    acquisition_function_optimizer_kwargs={'max_steps': 100},
                    acquisition_function=acquisition_func,
                    acquisition_function_kwargs={'par': 0.01}
                    )

    smac.optimize()
Ejemplo n.º 25
0
	os.system(cmd)
	cmd = 'sh ' + '__run 1'
	os.system(cmd)
	cmd = 'echo "time ./a.out $cmd" > tmp_bat'
	os.system(cmd)
	begin = time.time()
	for i in range(100):
		os.system('source tmp_bat')
	end = time.time()

	return end-begin

scenario = Scenario({"run_obj": "quality",   # we optimize quality (alternatively runtime)
                     "runcount-limit": 10,   # max. number of function evaluations; for this example set to a low number
                     "cs": cs,               # configuration space
                     "deterministic": "true"
                     })

smac = SMAC4HPO(scenario=scenario, rng=np.random.RandomState(42),
        tae_runner=run_gcc_time)

incumbent = smac.optimize()
inc_value = run_gcc_time(incumbent)

print ('the best time!')
print (inc_value)
print ('-----------------------------')
print ('-----------------------------')
with open('best_time.txt', 'w') as f:
    f.write(str(inc_value))
Ejemplo n.º 26
0
# Scenario object
scenario = Scenario({
    "run_obj": "quality",  # we optimize quality
    # (alternatively runtime)
    "runcount-limit": 10,
    "cs": cs,  # configuration space
    "deterministic": "false",
    "shared_model": True,
    "input_psmac_dirs": "smac-output-maxEnt",
    "cutoff_time": 9000,
    "wallclock_limit": 'inf'
})

# Example call of the function
# It returns: Status, Cost, Runtime, Additional Infos
def_value = maxEnt(cs.get_default_configuration())
print("Default Value: %.2f" % def_value)

# Optimize, using a SMAC-object
print("Optimizing! Depending on your machine, this might take a few minutes.")
smac = SMAC4HPO(scenario=scenario,
                rng=np.random.RandomState(42),
                tae_runner=maxEnt)

# Start optimization
try:
    incumbent = smac.optimize()
finally:
    incumbent = smac.solver.incumbent
Ejemplo n.º 27
0
    def test_ta_integration_to_smbo(self):
        """
        In SMBO. 3 objects need to actively comunicate:
            -> stats
            -> epm
            -> runhistory

        This method makes sure that executed jobs are properly registered
        in the above objects

        It uses n_workers to test parallel and serial implementations!!
        """

        for n_workers in range(1, 2):
            # We create a controlled setting, in which we optimize x^2
            # This will allow us to make sure every component act as expected

            # FIRST: config space
            cs = ConfigurationSpace()
            cs.add_hyperparameter(UniformFloatHyperparameter("x", -10.0, 10.0))
            smac = SMAC4HPO(
                scenario=Scenario({
                    "n_workers": n_workers,
                    "cs": cs,
                    "runcount_limit": 5,
                    "run_obj": "quality",
                    "deterministic": True,
                    "limit_resources": True,
                    "initial_incumbent": "DEFAULT",
                    "output_dir": "data-test_smbo",
                }),
                tae_runner=ExecuteTAFuncArray,
                tae_runner_kwargs={"ta": target},
            )

            # Register output dir for deletion
            self.output_dirs.append(smac.output_dir)

            smbo = smac.solver

            # SECOND: Intensifier that tracks configs
            all_configs = []

            def mock_get_next_run(**kwargs):
                config = cs.sample_configuration()
                all_configs.append(config)
                return (
                    RunInfoIntent.RUN,
                    RunInfo(
                        config=config,
                        instance=time.time() % 10,
                        instance_specific={},
                        seed=0,
                        cutoff=None,
                        capped=False,
                        budget=0.0,
                    ),
                )

            intensifier = unittest.mock.Mock()
            intensifier.num_run = 0
            intensifier.process_results.return_value = (0.0, 0.0)
            intensifier.get_next_run = mock_get_next_run
            smac.solver.intensifier = intensifier

            # THIRD: Run in this controlled setting
            smbo.run()

            # FOURTH: Checks

            # Make sure all configs where launched
            self.assertEqual(len(all_configs), 5)

            # Run history
            for k, v in smbo.runhistory.data.items():

                # All configuration should be successful
                self.assertEqual(v.status, StatusType.SUCCESS)

                # The value should be the square version of the config
                # The runhistory has  config_ids = {config: int}
                # The k here is {config_id: int}. We search for the actual config
                # by inverse searching this runhistory.config dict
                config = list(smbo.runhistory.config_ids.keys())[list(
                    smbo.runhistory.config_ids.values()).index(k.config_id)]

                self.assertEqual(v.cost, config.get("x")**2)

            # No config is lost in the config history
            self.assertCountEqual(smbo.runhistory.config_ids.keys(),
                                  all_configs)

            # Stats!
            # We do not exceed the number of target algorithm runs
            self.assertEqual(smbo.stats.submitted_ta_runs, len(all_configs))
            self.assertEqual(smbo.stats.finished_ta_runs, len(all_configs))

            # No config is lost
            self.assertEqual(smbo.stats.n_configs, len(all_configs))

            # The EPM can access all points. This is something that
            # also relies on the runhistory
            X, Y, X_config = smbo.epm_chooser._collect_data_to_train_model()
            self.assertEqual(X.shape[0], len(all_configs))
Ejemplo n.º 28
0
    def main_cli(
        self,
        commandline_arguments: typing.Optional[typing.List[str]] = None
    ) -> None:
        """Main function of SMAC for CLI interface"""
        self.logger.info("SMAC call: %s" % (" ".join(sys.argv)))

        cmd_reader = CMDReader()
        kwargs = {}
        if commandline_arguments:
            kwargs['commandline_arguments'] = commandline_arguments
        main_args_, smac_args_, scen_args_ = cmd_reader.read_cmd(**kwargs)

        root_logger = logging.getLogger()
        root_logger.setLevel(main_args_.verbose_level)
        logger_handler = logging.StreamHandler(stream=sys.stdout)
        if root_logger.level >= logging.INFO:
            formatter = logging.Formatter("%(levelname)s:\t%(message)s")
        else:
            formatter = logging.Formatter(
                "%(asctime)s:%(levelname)s:%(name)s:\t%(message)s",
                "%Y-%m-%d %H:%M:%S")
        logger_handler.setFormatter(formatter)
        root_logger.addHandler(logger_handler)
        # remove default handler
        if len(root_logger.handlers) > 1:
            root_logger.removeHandler(root_logger.handlers[0])

        # Create defaults
        rh = None
        initial_configs = None
        stats = None
        incumbent = None

        # Create scenario-object
        scenario = {}
        scenario.update(vars(smac_args_))
        scenario.update(vars(scen_args_))
        scen = Scenario(scenario=scenario)

        # Restore state
        if main_args_.restore_state:
            root_logger.debug("Restoring state from %s...",
                              main_args_.restore_state)
            restore_state = main_args_.restore_state
            rh, stats, traj_list_aclib, traj_list_old = self.restore_state(
                scen, restore_state)

            scen.output_dir_for_this_run = create_output_directory(
                scen,
                main_args_.seed,
                root_logger,
            )
            scen.write()
            incumbent = self.restore_state_after_output_dir(
                scen, stats, traj_list_aclib, traj_list_old)

        if main_args_.warmstart_runhistory:
            rh = RunHistory()

            scen, rh = merge_foreign_data_from_file(
                scenario=scen,
                runhistory=rh,
                in_scenario_fn_list=main_args_.warmstart_scenario,
                in_runhistory_fn_list=main_args_.warmstart_runhistory,
                cs=scen.cs,  # type: ignore[attr-defined] # noqa F821
            )

        if main_args_.warmstart_incumbent:
            initial_configs = [scen.cs.get_default_configuration()
                               ]  # type: ignore[attr-defined] # noqa F821
            for traj_fn in main_args_.warmstart_incumbent:
                trajectory = TrajLogger.read_traj_aclib_format(
                    fn=traj_fn,
                    cs=scen.cs,  # type: ignore[attr-defined] # noqa F821
                )
                initial_configs.append(trajectory[-1]["incumbent"])

        if main_args_.mode == "SMAC4AC":
            optimizer = SMAC4AC(scenario=scen,
                                rng=np.random.RandomState(main_args_.seed),
                                runhistory=rh,
                                initial_configurations=initial_configs,
                                stats=stats,
                                restore_incumbent=incumbent,
                                run_id=main_args_.seed)
        elif main_args_.mode == "SMAC4HPO":
            optimizer = SMAC4HPO(scenario=scen,
                                 rng=np.random.RandomState(main_args_.seed),
                                 runhistory=rh,
                                 initial_configurations=initial_configs,
                                 stats=stats,
                                 restore_incumbent=incumbent,
                                 run_id=main_args_.seed)
        elif main_args_.mode == "SMAC4BB":
            optimizer = SMAC4BB(scenario=scen,
                                rng=np.random.RandomState(main_args_.seed),
                                runhistory=rh,
                                initial_configurations=initial_configs,
                                stats=stats,
                                restore_incumbent=incumbent,
                                run_id=main_args_.seed)
        elif main_args_.mode == "ROAR":
            optimizer = ROAR(scenario=scen,
                             rng=np.random.RandomState(main_args_.seed),
                             runhistory=rh,
                             initial_configurations=initial_configs,
                             run_id=main_args_.seed)
        elif main_args_.mode == "Hydra":
            optimizer = Hydra(
                scenario=scen,
                rng=np.random.RandomState(main_args_.seed),
                runhistory=rh,
                initial_configurations=initial_configs,
                stats=stats,
                restore_incumbent=incumbent,
                run_id=main_args_.seed,
                random_configuration_chooser=main_args_.
                random_configuration_chooser,
                n_iterations=main_args_.hydra_iterations,
                val_set=main_args_.hydra_validation,
                incs_per_round=main_args_.hydra_incumbents_per_round,
                n_optimizers=main_args_.hydra_n_optimizers)
        elif main_args_.mode == "PSMAC":
            optimizer = PSMAC(
                scenario=scen,
                rng=np.random.RandomState(main_args_.seed),
                run_id=main_args_.seed,
                shared_model=smac_args_.shared_model,
                validate=main_args_.psmac_validate,
                n_optimizers=main_args_.hydra_n_optimizers,
                n_incs=main_args_.hydra_incumbents_per_round,
            )
        try:
            optimizer.optimize()
        except (TAEAbortException, FirstRunCrashedException) as err:
            self.logger.error(err)
Ejemplo n.º 29
0
    "run_obj": "quality",  # we optimize quality (alternatively runtime)
    "runcount-limit":
    500,  # max. number of function evaluations; for this example set to a low number
    "cs": cs,  # configuration space
    "deterministic": "true"
})

# Example call of the function
# It returns: Status, Cost, Runtime, Additional Infos
def_value = LR_from_cfg(cs.get_default_configuration())
print("Default Value: %.2f" % (def_value))

# Optimize, using a SMAC-object
print("Optimizing! Depending on your machine, this might take a few minutes.")
smac = SMAC4HPO(scenario=scenario,
                rng=np.random.RandomState(42),
                tae_runner=LR_from_cfg)

a_time = time.process_time()
incumbent = smac.optimize()
b_time = time.process_time()

print("+++++++++++++++++++++++")
print("Optimization finished. CPU time consumed: %s" % (a_time - b_time))
print("+++++++++++++++++++++++")

inc_value = LR_from_cfg(incumbent)

print("Optimized Value: %.6f" % (inc_value))

print("before validate time:%s" %
cs.add_hyperparameters([learning_rate, step_size_scalar, l2_regularizer, N])

# Scenario object
scenario = Scenario({"run_obj": "quality",  # we optimize quality
                     # (alternatively runtime)
                     "runcount-limit": 2,
                     "cs": cs,  # configuration space
                     "deterministic": "false",
                     "shared_model": True,
                     "input_psmac_dirs": "smac-output-learch",
                     "cutoff_time": 9000,
                     "wallclock_limit": 'inf'
                     })

# Example call of the function
# It returns: Status, Cost, Runtime, Additional Infos
def_value = learch_variant(cs.get_default_configuration())
print("Default Value: %.2f" % def_value)

# Optimize, using a SMAC-object
print("Optimizing! Depending on your machine, this might take a few minutes.")
smac = SMAC4HPO(scenario=scenario,
                rng=np.random.RandomState(42),
                tae_runner=learch_variant)

# Start optimization
try:
    incumbent = smac.optimize()
finally:
    incumbent = smac.solver.incumbent