예제 #1
0
def test_set_get_transformer():
    # can you pass a Space instance to the Space constructor?
    space = Space([(0.0, 1.0), (-5, 5),
                   ("a", "b", "c"), (1.0, 5.0, "log-uniform"), ("e", "f")])

    transformer = space.get_transformer()
    assert_array_equal(["identity", "identity", "onehot",
                        "identity", "onehot"], transformer)
    space.set_transformer("normalize")
    transformer = space.get_transformer()
    assert_array_equal(["normalize"] * 5, transformer)
    space.set_transformer(transformer)
    assert_array_equal(transformer, space.get_transformer())

    space.set_transformer_by_type("label", Categorical)
    assert space.dimensions[2].transform(["a"]) == [0]
예제 #2
0
    def generate(self, dimensions, n_samples, random_state=None):
        """Creates latin hypercube samples with maxpro criterion.
        Parameters
        ----------
        dimensions : list, shape (n_dims,)
            List of search space dimensions.
            Each search dimension can be defined either as
            - a `(lower_bound, upper_bound)` tuple (for `Real` or `Integer`
              dimensions),
            - a `(lower_bound, upper_bound, "prior")` tuple (for `Real`
              dimensions),
            - as a list of categories (for `Categorical` dimensions), or
            - an instance of a `Dimension` object (`Real`, `Integer` or
              `Categorical`).
        n_samples : int
            The order of the LHS sequence. Defines the number of samples.
        random_state : int, RandomState instance, or None (default)
            Set random state to something other than None for reproducible
            results.
        Returns
        -------
        np.array, shape=(n_dim, n_samples)
            LHS set
        """
        rng = check_random_state(random_state)
        space = Space(dimensions)
        transformer = space.get_transformer()
        n_dim = space.n_dims
        space.set_transformer("normalize")
        h = self._lhs_normalized(n_dim, n_samples, rng)

        self.num_pts = n_samples
        self.dim = n_dim
        if self.use_gradient:
            print('Using gradient descent')
            bounds = [(0, 1)] * len(dimensions) * self.num_pts
            h_opt = minimize(self.maxpro_criter,
                             h,
                             jac=self.maxpro_grad,
                             bounds=bounds)
            h_opt = h_opt['x'].reshape(n_samples, n_dim)
        else:
            print('Using naive method')
            best = 1e+6
            for i in range(self.iterations):
                h = self._lhs_normalized(n_dim, n_samples, i * rng)
                criter = self.maxpro_criter(h)
                if best > criter:
                    best = criter
                    h_opt = h.copy()
        h_opt = space.inverse_transform(h_opt)
        space.set_transformer(transformer)
        return h_opt