Exemple #1
0
 def config_signature(self, config, space: Dict = None) -> tuple:
     """Return the signature tuple of a config."""
     config = flatten_dict(config)
     space = flatten_dict(space) if space else self._space
     value_list = []
     # self._space_keys doesn't contain keys with const values,
     # e.g., "eval_metric": ["logloss", "error"].
     keys = sorted(config.keys()) if self.hierarchical else self._space_keys
     for key in keys:
         value = config[key]
         if key == self.resource_attr:
             value_list.append(value)
         else:
             # key must be in space
             domain = space[key]
             if self.hierarchical and not (
                     domain is None or type(domain) in (str, int, float)
                     or isinstance(domain, sample.Domain)):
                 # not domain or hashable
                 # get rid of list type for hierarchical search space.
                 continue
             if isinstance(domain, sample.Integer):
                 value_list.append(int(round(value)))
             else:
                 value_list.append(value)
     return tuple(value_list)
Exemple #2
0
    def create_trial_if_possible(self, experiment_spec: Dict,
                                 output_path: str) -> Optional[Trial]:
        logger.debug("creating trial")
        trial_id = Trial.generate_id()
        suggested_config = self.searcher.suggest(trial_id)
        if suggested_config == Searcher.FINISHED:
            self._finished = True
            logger.debug("Searcher has finished.")
            return

        if suggested_config is None:
            return
        spec = copy.deepcopy(experiment_spec)
        spec["config"] = merge_dicts(spec["config"],
                                     copy.deepcopy(suggested_config))

        # Create a new trial_id if duplicate trial is created
        flattened_config = resolve_nested_dict(spec["config"])
        self._counter += 1
        tag = "{0}_{1}".format(str(self._counter),
                               format_vars(flattened_config))
        trial = create_trial_from_spec(
            spec,
            output_path,
            self._parser,
            evaluated_params=flatten_dict(suggested_config),
            experiment_tag=tag,
            trial_id=trial_id,
        )
        return trial
Exemple #3
0
 def test_multi_level_nested(self):
     ori_in = OrderedDict(
         {
             "a": {
                 "b": {
                     "c": {
                         "d": 1,
                     },
                 },
             },
             "b": {
                 "c": {
                     "d": 2,
                 },
             },
             "c": {
                 "d": 3,
             },
             "e": 4,
         }
     )
     in_ = copy.deepcopy(ori_in)
     result = flatten_dict(in_)
     assert in_ == ori_in
     assert result == {"a/b/c/d": 1, "b/c/d": 2, "c/d": 3, "e": 4}
Exemple #4
0
    def __init__(
        self,
        space: Optional[
            Union[
                Dict[str, "OptunaDistribution"],
                List[Tuple],
                Callable[["OptunaTrial"], Optional[Dict[str, Any]]],
            ]
        ] = None,
        metric: Optional[Union[str, List[str]]] = None,
        mode: Optional[Union[str, List[str]]] = None,
        points_to_evaluate: Optional[List[Dict]] = None,
        sampler: Optional["BaseSampler"] = None,
        seed: Optional[int] = None,
        evaluated_rewards: Optional[List] = None,
    ):
        assert ot is not None, "Optuna must be installed! Run `pip install optuna`."
        super(OptunaSearch, self).__init__(
            metric=metric, mode=mode, max_concurrent=None, use_early_stopped_trials=None
        )

        if isinstance(space, dict) and space:
            resolved_vars, domain_vars, grid_vars = parse_spec_vars(space)
            if domain_vars or grid_vars:
                logger.warning(
                    UNRESOLVED_SEARCH_SPACE.format(par="space", cls=type(self).__name__)
                )
                space = self.convert_search_space(space)
            else:
                # Flatten to support nested dicts
                space = flatten_dict(space, "/")

        self._space = space

        self._points_to_evaluate = points_to_evaluate or []
        self._evaluated_rewards = evaluated_rewards

        self._study_name = "optuna"  # Fixed study name for in-memory storage

        if sampler and seed:
            logger.warning(
                "You passed an initialized sampler to `OptunaSearch`. The "
                "`seed` parameter has to be passed to the sampler directly "
                "and will be ignored."
            )
        elif sampler:
            assert isinstance(sampler, BaseSampler), (
                "You can only pass an instance of "
                "`optuna.samplers.BaseSampler` "
                "as a sampler to `OptunaSearcher`."
            )

        self._sampler = sampler
        self._seed = seed

        self._ot_trials = {}
        self._ot_study = None
        if self._space:
            self._setup_study(mode)
Exemple #5
0
    def __init__(self,
                 space: Optional[Union[Dict, List[Tuple]]] = None,
                 metric: Optional[str] = None,
                 mode: Optional[str] = None,
                 points_to_evaluate: Optional[List[Dict]] = None,
                 sampler: Optional[BaseSampler] = None,
                 seed: Optional[int] = None,
                 evaluated_rewards: Optional[List] = None):
        assert ot is not None, (
            "Optuna must be installed! Run `pip install optuna`.")
        super(OptunaSearch, self).__init__(metric=metric,
                                           mode=mode,
                                           max_concurrent=None,
                                           use_early_stopped_trials=None)

        if isinstance(space, dict) and space:
            resolved_vars, domain_vars, grid_vars = parse_spec_vars(space)
            if domain_vars or grid_vars:
                logger.warning(
                    UNRESOLVED_SEARCH_SPACE.format(par="space",
                                                   cls=type(self).__name__))
                space = self.convert_search_space(space)
            else:
                # Flatten to support nested dicts
                space = flatten_dict(space, "/")

        # Deprecate: 1.5
        if isinstance(space, list):
            logger.warning(
                "Passing lists of `param.suggest_*()` calls to OptunaSearch "
                "as a search space is deprecated and will be removed in "
                "a future release of Ray. Please pass a dict mapping "
                "to `optuna.distributions` objects instead.")

        self._space = space

        self._points_to_evaluate = points_to_evaluate or []
        self._evaluated_rewards = evaluated_rewards

        self._study_name = "optuna"  # Fixed study name for in-memory storage

        if sampler and seed:
            logger.warning(
                "You passed an initialized sampler to `OptunaSearch`. The "
                "`seed` parameter has to be passed to the sampler directly "
                "and will be ignored.")

        self._sampler = sampler or ot.samplers.TPESampler(seed=seed)

        assert isinstance(self._sampler, BaseSampler), \
            "You can only pass an instance of `optuna.samplers.BaseSampler` " \
            "as a sampler to `OptunaSearcher`."

        self._ot_trials = {}
        self._ot_study = None
        if self._space:
            self._setup_study(mode)
Exemple #6
0
 def _priority(self, checkpoint):
     result = flatten_dict(checkpoint.result)
     priority = result[self._checkpoint_score_attr]
     if self._checkpoint_score_desc:
         priority = -priority
     return (
         not is_nan(priority),
         priority if not is_nan(priority) else 0,
         checkpoint.order,
     )
Exemple #7
0
    def convert_search_space(spec: Dict) -> Parameter:
        resolved_vars, domain_vars, grid_vars = parse_spec_vars(spec)

        if grid_vars:
            raise ValueError(
                "Grid search parameters cannot be automatically converted "
                "to a Nevergrad search space.")

        # Flatten and resolve again after checking for grid search.
        spec = flatten_dict(spec, prevent_delimiter=True)
        resolved_vars, domain_vars, grid_vars = parse_spec_vars(spec)

        def resolve_value(domain: Domain) -> Parameter:
            sampler = domain.get_sampler()
            if isinstance(sampler, Quantized):
                logger.warning(
                    "Nevergrad does not support quantization. Dropped quantization."
                )
                sampler = sampler.get_sampler()

            if isinstance(domain, Float):
                if isinstance(sampler, LogUniform):
                    return ng.p.Log(lower=domain.lower,
                                    upper=domain.upper,
                                    exponent=sampler.base)
                return ng.p.Scalar(lower=domain.lower, upper=domain.upper)

            elif isinstance(domain, Integer):
                if isinstance(sampler, LogUniform):
                    return ng.p.Log(
                        lower=domain.lower,
                        upper=domain.upper - 1,  # Upper bound exclusive
                        exponent=sampler.base,
                    ).set_integer_casting()
                return ng.p.Scalar(
                    lower=domain.lower,
                    upper=domain.upper - 1,  # Upper bound exclusive
                ).set_integer_casting()

            elif isinstance(domain, Categorical):
                return ng.p.Choice(choices=domain.categories)

            raise ValueError("Nevergrad does not support parameters of type "
                             "`{}` with samplers of type `{}`".format(
                                 type(domain).__name__,
                                 type(domain.sampler).__name__))

        # Parameter name is e.g. "a/b/c" for nested dicts
        space = {
            "/".join(path): resolve_value(domain)
            for path, domain in domain_vars
        }

        return ng.p.Dict(**space)
Exemple #8
0
 def normalize(self, config) -> Dict:
     ''' normalize each dimension in config to [0,1]
     '''
     config_norm = {}
     for key, value in flatten_dict(config).items():
         if key in self.space:
             # domain: sample.Categorical/Integer/Float/Function
             domain = self.space[key]
             if not callable(getattr(domain, 'get_sampler', None)):
                 config_norm[key] = value
             else:
                 if isinstance(domain, sample.Categorical):
                     # normalize categorical
                     if key in self._ordered_cat_hp:
                         l, d = self._ordered_cat_hp[key]
                         config_norm[key] = (d[value] + 0.5) / len(
                             l)  # center
                     elif key in self._ordered_choice_hp:
                         l, d = self._ordered_choice_hp[key]
                         config_norm[key] = (d[value] + 0.5) / len(
                             l)  # center
                     elif key in self.incumbent:
                         config_norm[key] = self.incumbent[
                             key] if value == self.best_config[key] else (
                                 self.incumbent[key] +
                                 1) % self._unordered_cat_hp[key]
                     else:
                         config_norm[key] = 0
                     continue
                 # Uniform/LogUniform/Normal/Base
                 sampler = domain.get_sampler()
                 if isinstance(sampler, sample.Quantized):
                     # sampler is sample.Quantized
                     sampler = sampler.get_sampler()
                 if str(sampler) == 'LogUniform':
                     config_norm[key] = np.log(
                         value / domain.lower) / np.log(
                             domain.upper / domain.lower)
                 elif str(sampler) == 'Uniform':
                     config_norm[key] = (value - domain.lower) / (
                         domain.upper - domain.lower)
                 elif str(sampler) == 'Normal':
                     # N(mean, sd) -> N(0,1)
                     config_norm[key] = (value - sampler.mean) / sampler.sd
                 else:
                     # TODO? elif str(sampler) == 'Base': # sample.Function._CallSampler
                     # e.g., {test: sample_from(lambda spec: randn(10, 2).sample() * 0.01)}
                     config_norm[key] = value
                     # print(key+"'s value is not normalized")
         else:  # prune_attr
             config_norm[key] = value
     return config_norm
Exemple #9
0
    def complete_config(self,
                        partial_config: Dict,
                        lower: Optional[Dict] = None,
                        upper: Optional[Dict] = None) -> Dict:
        ''' generate a complete config from the partial config input
        add minimal resource to config if available
        '''
        if self._reset_times and partial_config == self.init_config:
            # not the first time to complete init_config, use random gaussian
            normalized = self.normalize(partial_config)
            for key in normalized:
                # don't change unordered cat choice
                if key not in self._unordered_cat_hp:
                    if upper and lower:
                        u, l = upper[key], lower[key]
                        gauss_std = u - l or self.STEPSIZE
                        # allowed bound
                        u += self.STEPSIZE
                        l -= self.STEPSIZE
                    elif key in self._bounded_keys:
                        u, l, gauss_std = 1, 0, 1.0
                    else:
                        u, l, gauss_std = np.Inf, -np.Inf, 1.0
                    if key in self._bounded_keys:
                        u = min(u, 1)
                        l = max(l, 0)
                    delta = self.rand_vector_gaussian(1, gauss_std)[0]
                    normalized[key] = max(l, min(u, normalized[key] + delta))
            # use best config for unordered cat choice
            config = self.denormalize(normalized)
        else:
            # first time init_config, or other configs, take as is
            config = partial_config.copy()
        if partial_config == self.init_config: self._reset_times += 1
        config = flatten_dict(config)
        for key, value in self.space.items():
            if key not in config:
                config[key] = value
        # logger.debug(f'before random {config}')
        for _, generated in generate_variants({'config': config}):
            config = generated['config']
            break
        # logger.debug(f'after random {config}')

        if self._resource:
            config[self.prune_attr] = self.min_resource
        return unflatten_dict(config)
Exemple #10
0
    def on_result(self, result):
        tmp = result.copy()
        elim = [
            "config", "pid", "timestamp", "done", TIME_TOTAL_S,
            TRAINING_ITERATION
        ]
        for k in elim:
            if k in tmp:
                del tmp[k]

        step = result.get(TIMESTEPS_TOTAL) or result[TRAINING_ITERATION]
        tmp.update({TRAINING_ITERATION: result[TRAINING_ITERATION]})
        for key, val in flatten_dict(tmp).items():
            if isinstance(val, tuple(VALID_SUMMARY_TYPES)):
                self._file_writer.add_scalar("/".join(["ray", "tune", key]),
                                             val, step)

        self._file_writer.flush()
Exemple #11
0
 def set_search_properties(
     self,
     metric: Optional[str] = None,
     mode: Optional[str] = None,
     config: Optional[Dict] = None,
 ) -> bool:
     if metric:
         self._metric = metric
     if mode:
         assert mode in ["min", "max"], "`mode` must be 'min' or 'max'."
         self._mode = mode
         if mode == "max":
             self.metric_op = -1.0
         elif mode == "min":
             self.metric_op = 1.0
     if config:
         self.space = config
         self._space = flatten_dict(self.space)
         self._init_search()
     return True
Exemple #12
0
    def convert_search_space(spec: Dict) -> List[Dict]:
        resolved_vars, domain_vars, grid_vars = parse_spec_vars(spec)

        if grid_vars:
            raise ValueError(
                "Grid search parameters cannot be automatically converted "
                "to a Dragonfly search space.")

        # Flatten and resolve again after checking for grid search.
        spec = flatten_dict(spec, prevent_delimiter=True)
        resolved_vars, domain_vars, grid_vars = parse_spec_vars(spec)

        def resolve_value(par: str, domain: Domain) -> Dict:
            sampler = domain.get_sampler()
            if isinstance(sampler, Quantized):
                logger.warning(
                    "Dragonfly search does not support quantization. "
                    "Dropped quantization.")
                sampler = sampler.get_sampler()

            if isinstance(domain, Float):
                if domain.sampler is not None:
                    logger.warning(
                        "Dragonfly does not support specific sampling methods."
                        " The {} sampler will be dropped.".format(sampler))
                return {
                    "name": par,
                    "type": "float",
                    "min": domain.lower,
                    "max": domain.upper,
                }

            raise ValueError("Dragonfly does not support parameters of type "
                             "`{}`".format(type(domain).__name__))

        # Parameter name is e.g. "a/b/c" for nested dicts
        space = [
            resolve_value("/".join(path), domain)
            for path, domain in domain_vars
        ]
        return space
Exemple #13
0
 def config_signature(self, config) -> tuple:
     ''' return the signature tuple of a config
     '''
     config = flatten_dict(config)
     value_list = []
     for key in self._space_keys:
         if key in config:
             value = config[key]
             if key == self.prune_attr:
                 value_list.append(value)
             # else key must be in self.space
             # get rid of list type or constant,
             # e.g., "eval_metric": ["logloss", "error"]
             elif callable(getattr(self.space[key], 'sample', None)):
                 if isinstance(self.space[key], sample.Integer):
                     value_list.append(int(round(value)))
                 else:
                     value_list.append(value)
         else:
             value_list.append(None)
     return tuple(value_list)
Exemple #14
0
 def test_output_type(self):
     in_ = OrderedDict({"a": {"b": 1}, "c": {"d": 2}, "e": 3})
     out = flatten_dict(in_)
     assert type(in_) is type(out)
Exemple #15
0
    def __init__(
        self,
        init_config: dict,
        metric: Optional[str] = None,
        mode: Optional[str] = None,
        space: Optional[dict] = None,
        resource_attr: Optional[str] = None,
        min_resource: Optional[float] = None,
        max_resource: Optional[float] = None,
        resource_multiple_factor: Optional[float] = 4,
        cost_attr: Optional[str] = "time_total_s",
        seed: Optional[int] = 20,
    ):
        """Constructor.

        Args:
            init_config: a dictionary of a partial or full initial config,
                e.g., from a subset of controlled dimensions
                to the initial low-cost values.
                E.g., {'epochs': 1}.
            metric: A string of the metric name to optimize for.
            mode: A string in ['min', 'max'] to specify the objective as
                minimization or maximization.
            cat_hp_cost: A dictionary from a subset of categorical dimensions
                to the relative cost of each choice.
                E.g., ```{'tree_method': [1, 1, 2]}```. I.e., the relative cost
                of the three choices of 'tree_method' is 1, 1 and 2 respectively.
            space: A dictionary to specify the search space.
            resource_attr: A string to specify the resource dimension and the best
                performance is assumed to be at the max_resource.
            min_resource: A float of the minimal resource to use for the resource_attr.
            max_resource: A float of the maximal resource to use for the resource_attr.
            resource_multiple_factor: A float of the multiplicative factor
                used for increasing resource.
            cost_attr: A string of the attribute used for cost.
            seed: An integer of the random seed.
        """
        if mode:
            assert mode in ["min", "max"], "`mode` must be 'min' or 'max'."
        else:
            mode = "min"

        super(FLOW2, self).__init__(metric=metric, mode=mode)
        # internally minimizes, so "max" => -1
        if mode == "max":
            self.metric_op = -1.0
        elif mode == "min":
            self.metric_op = 1.0
        self.space = space or {}
        self._space = flatten_dict(self.space, prevent_delimiter=True)
        self._random = np.random.RandomState(seed)
        self.rs_random = _BackwardsCompatibleNumpyRng(seed + 19823)
        self.seed = seed
        self.init_config = init_config
        self.best_config = flatten_dict(init_config)
        self.resource_attr = resource_attr
        self.min_resource = min_resource
        self.resource_multiple_factor = resource_multiple_factor or 4
        self.cost_attr = cost_attr
        self.max_resource = max_resource
        self._resource = None
        self._step_lb = np.Inf
        if space is not None:
            self._init_search()
Exemple #16
0
    def convert_search_space(spec: Dict) -> List[Tuple]:
        resolved_vars, domain_vars, grid_vars = parse_spec_vars(spec)

        if not domain_vars and not grid_vars:
            return []

        if grid_vars:
            raise ValueError(
                "Grid search parameters cannot be automatically converted "
                "to an Optuna search space.")

        # Flatten and resolve again after checking for grid search.
        spec = flatten_dict(spec, prevent_delimiter=True)
        resolved_vars, domain_vars, grid_vars = parse_spec_vars(spec)

        def resolve_value(par: str, domain: Domain) -> Tuple:
            quantize = None

            sampler = domain.get_sampler()
            if isinstance(sampler, Quantized):
                quantize = sampler.q
                sampler = sampler.sampler

            if isinstance(domain, Float):
                if isinstance(sampler, LogUniform):
                    if quantize:
                        logger.warning(
                            "Optuna does not support both quantization and "
                            "sampling from LogUniform. Dropped quantization.")
                    return param.suggest_loguniform(par, domain.lower,
                                                    domain.upper)
                elif isinstance(sampler, Uniform):
                    if quantize:
                        return param.suggest_discrete_uniform(
                            par, domain.lower, domain.upper, quantize)
                    return param.suggest_uniform(par, domain.lower,
                                                 domain.upper)
            elif isinstance(domain, Integer):
                if isinstance(sampler, LogUniform):
                    if quantize:
                        logger.warning(
                            "Optuna does not support both quantization and "
                            "sampling from LogUniform. Dropped quantization.")
                    return param.suggest_int(par,
                                             domain.lower,
                                             domain.upper,
                                             log=True)
                elif isinstance(sampler, Uniform):
                    return param.suggest_int(par,
                                             domain.lower,
                                             domain.upper,
                                             step=quantize or 1)
            elif isinstance(domain, Categorical):
                if isinstance(sampler, Uniform):
                    return param.suggest_categorical(par, domain.categories)

            raise ValueError(
                "Optuna search does not support parameters of type "
                "`{}` with samplers of type `{}`".format(
                    type(domain).__name__,
                    type(domain.sampler).__name__))

        # Parameter name is e.g. "a/b/c" for nested dicts
        values = [
            resolve_value("/".join(path), domain)
            for path, domain in domain_vars
        ]

        return values
Exemple #17
0
    def convert_search_space(spec: Dict) -> Dict[str, Any]:
        resolved_vars, domain_vars, grid_vars = parse_spec_vars(spec)

        if not domain_vars and not grid_vars:
            return {}

        if grid_vars:
            raise ValueError(
                "Grid search parameters cannot be automatically converted "
                "to an Optuna search space.")

        # Flatten and resolve again after checking for grid search.
        spec = flatten_dict(spec, prevent_delimiter=True)
        resolved_vars, domain_vars, grid_vars = parse_spec_vars(spec)

        def resolve_value(domain: Domain) -> ot.distributions.BaseDistribution:
            quantize = None

            sampler = domain.get_sampler()
            if isinstance(sampler, Quantized):
                quantize = sampler.q
                sampler = sampler.sampler
                if isinstance(sampler, LogUniform):
                    logger.warning(
                        "Optuna does not handle quantization in loguniform "
                        "sampling. The parameter will be passed but it will "
                        "probably be ignored.")

            if isinstance(domain, Float):
                if isinstance(sampler, LogUniform):
                    if quantize:
                        logger.warning(
                            "Optuna does not support both quantization and "
                            "sampling from LogUniform. Dropped quantization.")
                    return ot.distributions.LogUniformDistribution(
                        domain.lower, domain.upper)

                elif isinstance(sampler, Uniform):
                    if quantize:
                        return ot.distributions.DiscreteUniformDistribution(
                            domain.lower, domain.upper, quantize)
                    return ot.distributions.UniformDistribution(
                        domain.lower, domain.upper)

            elif isinstance(domain, Integer):
                if isinstance(sampler, LogUniform):
                    return ot.distributions.IntLogUniformDistribution(
                        domain.lower, domain.upper - 1, step=quantize or 1)
                elif isinstance(sampler, Uniform):
                    # Upper bound should be inclusive for quantization and
                    # exclusive otherwise
                    return ot.distributions.IntUniformDistribution(
                        domain.lower,
                        domain.upper - int(bool(not quantize)),
                        step=quantize or 1)
            elif isinstance(domain, Categorical):
                if isinstance(sampler, Uniform):
                    return ot.distributions.CategoricalDistribution(
                        domain.categories)

            raise ValueError(
                "Optuna search does not support parameters of type "
                "`{}` with samplers of type `{}`".format(
                    type(domain).__name__,
                    type(domain.sampler).__name__))

        # Parameter name is e.g. "a/b/c" for nested dicts
        values = {
            "/".join(path): resolve_value(domain)
            for path, domain in domain_vars
        }

        return values
Exemple #18
0
    def __init__(self,
                 init_config: dict,
                 metric: Optional[str] = None,
                 mode: Optional[str] = None,
                 cat_hp_cost: Optional[dict] = None,
                 space: Optional[dict] = None,
                 prune_attr: Optional[str] = None,
                 min_resource: Optional[float] = None,
                 max_resource: Optional[float] = None,
                 resource_multiple_factor: Optional[float] = 4,
                 seed: Optional[int] = 20):
        '''Constructor

        Args:
            init_config: a dictionary from a subset of controlled dimensions
                to the initial low-cost values. e.g. {'epochs':1}
            metric: A string of the metric name to optimize for.
                minimization or maximization.
            mode: A string in ['min', 'max'] to specify the objective as
            cat_hp_cost: A dictionary from a subset of categorical dimensions
                to the relative cost of each choice. 
                e.g.,
                
                .. code-block:: python

                    {'tree_method': [1, 1, 2]}
                
                i.e., the relative cost of the 
                three choices of 'tree_method' is 1, 1 and 2 respectively.
            space: A dictionary to specify the search space.
            prune_attr: A string of the attribute used for pruning. 
                Not necessarily in space.
                When prune_attr is in space, it is a hyperparameter, e.g., 
                    'n_iters', and the best value is unknown.
                When prune_attr is not in space, it is a resource dimension, 
                    e.g., 'sample_size', and the peak performance is assumed
                    to be at the max_resource.
            min_resource: A float of the minimal resource to use for the 
                prune_attr; only valid if prune_attr is not in space.
            max_resource: A float of the maximal resource to use for the 
                prune_attr; only valid if prune_attr is not in space.
            resource_multiple_factor: A float of the multiplicative factor
                used for increasing resource.
            seed: An integer of the random seed.
        '''
        if mode:
            assert mode in ["min", "max"], "`mode` must be 'min' or 'max'."
        else:
            mode = "min"

        super(FLOW2, self).__init__(metric=metric, mode=mode)
        # internally minimizes, so "max" => -1
        if mode == "max":
            self.metric_op = -1.
        elif mode == "min":
            self.metric_op = 1.
        self.space = space or {}
        self.space = flatten_dict(self.space, prevent_delimiter=True)
        self._random = np.random.RandomState(seed)
        self._seed = seed
        if not init_config:
            logger.warning(
                "No init config given to FLOW2. Using random initial config."
                "For cost-frugal search, "
                "consider providing init values for cost-related hps via "
                "'init_config'.")
        self.init_config = init_config
        self.best_config = flatten_dict(init_config)
        self.cat_hp_cost = cat_hp_cost
        self.prune_attr = prune_attr
        self.min_resource = min_resource
        self.resource_multiple_factor = resource_multiple_factor or 4
        self.max_resource = max_resource
        self._resource = None
        self._step_lb = np.Inf
        if space:
            self._init_search()
Exemple #19
0
    def convert_search_space(spec: Dict) -> "ConfigSpace.ConfigurationSpace":
        resolved_vars, domain_vars, grid_vars = parse_spec_vars(spec)

        if grid_vars:
            raise ValueError(
                "Grid search parameters cannot be automatically converted "
                "to a TuneBOHB search space.")

        # Flatten and resolve again after checking for grid search.
        spec = flatten_dict(spec, prevent_delimiter=True)
        resolved_vars, domain_vars, grid_vars = parse_spec_vars(spec)

        def resolve_value(
                par: str,
                domain: Domain) -> ConfigSpace.hyperparameters.Hyperparameter:
            quantize = None

            sampler = domain.get_sampler()
            if isinstance(sampler, Quantized):
                quantize = sampler.q
                sampler = sampler.sampler

            if isinstance(domain, Float):
                if isinstance(sampler, LogUniform):
                    lower = domain.lower
                    upper = domain.upper
                    if quantize:
                        lower = math.ceil(domain.lower / quantize) * quantize
                        upper = math.floor(domain.upper / quantize) * quantize
                    return ConfigSpace.UniformFloatHyperparameter(par,
                                                                  lower=lower,
                                                                  upper=upper,
                                                                  q=quantize,
                                                                  log=True)
                elif isinstance(sampler, Uniform):
                    lower = domain.lower
                    upper = domain.upper
                    if quantize:
                        lower = math.ceil(domain.lower / quantize) * quantize
                        upper = math.floor(domain.upper / quantize) * quantize
                    return ConfigSpace.UniformFloatHyperparameter(par,
                                                                  lower=lower,
                                                                  upper=upper,
                                                                  q=quantize,
                                                                  log=False)
                elif isinstance(sampler, Normal):
                    return ConfigSpace.hyperparameters.\
                       NormalFloatHyperparameter(
                        par,
                        mu=sampler.mean,
                        sigma=sampler.sd,
                        q=quantize,
                        log=False)

            elif isinstance(domain, Integer):
                if isinstance(sampler, LogUniform):
                    lower = domain.lower
                    upper = domain.upper
                    if quantize:
                        lower = math.ceil(domain.lower / quantize) * quantize
                        upper = math.floor(domain.upper / quantize) * quantize
                    else:
                        # Tune search space integers are exclusive
                        upper -= 1
                    return ConfigSpace.UniformIntegerHyperparameter(
                        par, lower=lower, upper=upper, q=quantize, log=True)
                elif isinstance(sampler, Uniform):
                    lower = domain.lower
                    upper = domain.upper
                    if quantize:
                        lower = math.ceil(domain.lower / quantize) * quantize
                        upper = math.floor(domain.upper / quantize) * quantize
                    else:
                        # Tune search space integers are exclusive
                        upper -= 1
                    return ConfigSpace.UniformIntegerHyperparameter(
                        par, lower=lower, upper=upper, q=quantize, log=False)

            elif isinstance(domain, Categorical):
                if isinstance(sampler, Uniform):
                    return ConfigSpace.CategoricalHyperparameter(
                        par, choices=domain.categories)

            raise ValueError("TuneBOHB does not support parameters of type "
                             "`{}` with samplers of type `{}`".format(
                                 type(domain).__name__,
                                 type(domain.sampler).__name__))

        cs = ConfigSpace.ConfigurationSpace()
        for path, domain in domain_vars:
            par = "/".join(str(p) for p in path)
            value = resolve_value(par, domain)
            cs.add_hyperparameter(value)

        return cs
Exemple #20
0
    def convert_search_space(spec: Dict):
        resolved_vars, domain_vars, grid_vars = parse_spec_vars(spec)

        if grid_vars:
            raise ValueError(
                "Grid search parameters cannot be automatically converted "
                "to an Ax search space.")

        # Flatten and resolve again after checking for grid search.
        spec = flatten_dict(spec, prevent_delimiter=True)
        resolved_vars, domain_vars, grid_vars = parse_spec_vars(spec)

        def resolve_value(par, domain):
            sampler = domain.get_sampler()
            if isinstance(sampler, Quantized):
                logger.warning(
                    "AxSearch does not support quantization. Dropped quantization."
                )
                sampler = sampler.sampler

            if isinstance(domain, Float):
                if isinstance(sampler, LogUniform):
                    return {
                        "name": par,
                        "type": "range",
                        "bounds": [domain.lower, domain.upper],
                        "value_type": "float",
                        "log_scale": True,
                    }
                elif isinstance(sampler, Uniform):
                    return {
                        "name": par,
                        "type": "range",
                        "bounds": [domain.lower, domain.upper],
                        "value_type": "float",
                        "log_scale": False,
                    }
            elif isinstance(domain, Integer):
                if isinstance(sampler, LogUniform):
                    return {
                        "name": par,
                        "type": "range",
                        "bounds": [domain.lower, domain.upper - 1],
                        "value_type": "int",
                        "log_scale": True,
                    }
                elif isinstance(sampler, Uniform):
                    return {
                        "name": par,
                        "type": "range",
                        "bounds": [domain.lower, domain.upper - 1],
                        "value_type": "int",
                        "log_scale": False,
                    }
            elif isinstance(domain, Categorical):
                if isinstance(sampler, Uniform):
                    return {
                        "name": par,
                        "type": "choice",
                        "values": domain.categories
                    }

            raise ValueError("AxSearch does not support parameters of type "
                             "`{}` with samplers of type `{}`".format(
                                 type(domain).__name__,
                                 type(domain.sampler).__name__))

        # Fixed vars
        fixed_values = [{
            "name": "/".join(path),
            "type": "fixed",
            "value": val
        } for path, val in resolved_vars]

        # Parameter name is e.g. "a/b/c" for nested dicts
        resolved_values = [
            resolve_value("/".join(path), domain)
            for path, domain in domain_vars
        ]

        return fixed_values + resolved_values
Exemple #21
0
 def _priority(self, checkpoint):
     result = flatten_dict(checkpoint.result)
     priority = result[self._checkpoint_score_attr]
     return -priority if self._checkpoint_score_desc else priority
Exemple #22
0
 def test_one_level_nested(self):
     ori_in = OrderedDict({"a": {"b": 1}, "c": {"d": 2}, "e": 3})
     in_ = copy.deepcopy(ori_in)
     result = flatten_dict(in_)
     assert in_ == ori_in
     assert result == {"a/b": 1, "c/d": 2, "e": 3}