def test_normalize(): # can you pass a Space instance to the Space constructor? space = Space([(0.0, 1.0), (-5, 5), ("a", "b", "c"), (1.0, 5.0, "log-uniform"), ("e", "f")]) space.set_transformer("normalize") X = [[0., -5, 'a', 1., 'e']] Xt = np.zeros((1, 5)) assert_array_equal(space.transform(X), Xt) assert_array_equal(space.inverse_transform(Xt), X) assert_array_equal(space.inverse_transform(space.transform(X)), X)
def test_normalize_types(): # can you pass a Space instance to the Space constructor? space = Space([(0.0, 1.0), Integer(-5, 5, dtype=int), (True, False)]) space.set_transformer("normalize") X = [[0., -5, False]] Xt = np.zeros((1, 3)) assert_array_equal(space.transform(X), Xt) assert_array_equal(space.inverse_transform(Xt), X) assert_array_equal(space.inverse_transform(space.transform(X)), X) assert isinstance(space.inverse_transform(Xt)[0][0], float) assert isinstance(space.inverse_transform(Xt)[0][1], int) assert isinstance(space.inverse_transform(Xt)[0][2], (np.bool_, bool))
def generate(self, dimensions, n_samples, random_state=None): """Creates latin hypercube samples with maxpro criterion. Parameters ---------- dimensions : list, shape (n_dims,) List of search space dimensions. Each search dimension can be defined either as - a `(lower_bound, upper_bound)` tuple (for `Real` or `Integer` dimensions), - a `(lower_bound, upper_bound, "prior")` tuple (for `Real` dimensions), - as a list of categories (for `Categorical` dimensions), or - an instance of a `Dimension` object (`Real`, `Integer` or `Categorical`). n_samples : int The order of the LHS sequence. Defines the number of samples. random_state : int, RandomState instance, or None (default) Set random state to something other than None for reproducible results. Returns ------- np.array, shape=(n_dim, n_samples) LHS set """ rng = check_random_state(random_state) space = Space(dimensions) transformer = space.get_transformer() n_dim = space.n_dims space.set_transformer("normalize") h = self._lhs_normalized(n_dim, n_samples, rng) self.num_pts = n_samples self.dim = n_dim if self.use_gradient: print('Using gradient descent') bounds = [(0, 1)] * len(dimensions) * self.num_pts h_opt = minimize(self.maxpro_criter, h, jac=self.maxpro_grad, bounds=bounds) h_opt = h_opt['x'].reshape(n_samples, n_dim) else: print('Using naive method') best = 1e+6 for i in range(self.iterations): h = self._lhs_normalized(n_dim, n_samples, i * rng) criter = self.maxpro_criter(h) if best > criter: best = criter h_opt = h.copy() h_opt = space.inverse_transform(h_opt) space.set_transformer(transformer) return h_opt
def test_set_get_transformer(): # can you pass a Space instance to the Space constructor? space = Space([(0.0, 1.0), (-5, 5), ("a", "b", "c"), (1.0, 5.0, "log-uniform"), ("e", "f")]) transformer = space.get_transformer() assert_array_equal(["identity", "identity", "onehot", "identity", "onehot"], transformer) space.set_transformer("normalize") transformer = space.get_transformer() assert_array_equal(["normalize"] * 5, transformer) space.set_transformer(transformer) assert_array_equal(transformer, space.get_transformer()) space.set_transformer_by_type("label", Categorical) assert space.dimensions[2].transform(["a"]) == [0]
class Transformer: """ Transformer stage of MOFA Parameters ---------- roi_space: `orion.algo.space.Space` Parameter region-of-interest as orion.algo.space.Space instance n_levels: int Number of levels """ def __init__(self, roi_space: Space, n_levels: int): self.n_levels = n_levels self.space = roi_space self.sk_space = SkSpace(fix_shape_intervals(roi_space.interval())) self.sk_space.set_transformer("normalize") def generate_olh_perf_table(self, trials: list[Trial]) -> pd.DataFrame: """ Build an orthogonal Latin hypercube (OLH) performance table from trial parameters Parameters ---------- trials: list of orion.core.worker.trial.Trial objects Completed trials """ # TODO: deal with categoricals # Put trial params into list olh_param_table = [] olh_objective_table = [] for trial in trials: if trial.status != "completed": continue # Take subset in self.space only trial_params = flatten(trial.params) param_vals = [trial_params[key] for key in self.space] olh_param_table.append(param_vals) olh_objective_table.append(trial.objective.value) # Normalize olh_param_table = np.clip( np.array(olh_param_table), a_min=[bound[0] for bound in self.sk_space.bounds], a_max=[bound[1] for bound in self.sk_space.bounds], ) olh_param_table = self.sk_space.transform(olh_param_table) table = np.hstack( [olh_param_table, np.array(olh_objective_table)[:, None]]) return pd.DataFrame(table, columns=list(self.space.keys()) + ["objective"]) @staticmethod def _collapse_levels(olh_perf_table: pd.DataFrame, n_levels: int) -> pd.DataFrame: """ Collapses the levels of an orthagonal Latin hypercube parameter table Parameters ---------- olh_perf_table: `pandas.DataFrame` array of normalized trial parameters as floats in range [0., 1.] n_levels: int number of levels in the OLH table Returns ------- A ``pandas.DataFrame`` of the orthogonal array table """ oa_table = np.ceil(olh_perf_table.iloc[:, :-1] * (n_levels)).astype(int) oa_table[oa_table.iloc[:, :] == 0] = 1 oa_table["objective"] = olh_perf_table["objective"] return oa_table def generate_oa_table(self, trials: list[Trial]) -> pd.DataFrame: """ Generates the orthogonal array performance table Parameters ---------- trials: list of `orion.core.worker.trial.Trial` objects Completed trials Returns ------- A ``pandas.DataFrame`` of the OA table with parameters and trial objective values """ olh_perf_table = self.generate_olh_perf_table(trials) return self._collapse_levels(olh_perf_table, self.n_levels)