Beispiel #1
0
def estimate_local_lipschitz_constant(surrogate_model, X_busy, n_sample=500):
    '''
    Estimate local Lipschitz constant from a surrogate model
    
    Returns
    -------
    L: np.array ~ (n_busy, n_obj)
    '''
    assert isinstance(surrogate_model, GaussianProcess)
    n_var, n_obj = surrogate_model.n_var, surrogate_model.n_obj
    n_busy = len(X_busy)

    L = np.zeros((n_busy, n_obj))
    for i in range(n_obj):
        length_scale = surrogate_model.gps[i].kernel_.get_params()['k1__k2__length_scale']
        lower_bounds = np.maximum(X_busy - 0.5 * length_scale, 0.0)
        upper_bounds = np.minimum(X_busy + 0.5 * length_scale, 1.0)
        
        for j in range(n_busy):
            bounds = np.vstack([lower_bounds[j], upper_bounds[j]]).T
            X_sample = lower_bounds[j] + lhs(n_var, n_sample) * (upper_bounds[j] - lower_bounds[j])
            dF_i_norm_sample = calc_dF_norm(X_sample, surrogate_model)[:, i]
            x0 = X_sample[np.argmin(dF_i_norm_sample)]
            res = minimize(calc_dF_norm_per_obj, x0, method='L-BFGS-B', bounds=bounds, args=(surrogate_model, i))
            L[j][i] = -float(res.fun)
            if L[j][i] < 1e-7: L[j][i] = 10.0

    return L
Beispiel #2
0
    def _solve(self, X, Y, batch_size):
        '''
        Solve the multi-objective problem by multiple scalarized single-objective solvers.
        '''
        # generate scalarization weights
        weights = np.random.random((batch_size, self.problem.n_obj))
        weights /= np.expand_dims(np.sum(weights, axis=1), 1)

        # initial solutions
        X = np.vstack([X, lhs(X.shape[1], batch_size)])
        F = self.problem.evaluate(X, return_values_of=['F'])

        # optimization
        xs, ys = [], []
        queue = Queue()
        n_active_process = 0
        for i in range(batch_size):
            x0 = X[np.argmin(augmented_tchebicheff(F, weights[i]))]
            Process(target=optimization,
                    args=(self.problem, x0, weights[i], queue)).start()
            n_active_process += 1
            if n_active_process >= self.n_process:
                x, y = queue.get()
                xs.append(x)
                ys.append(y)
                n_active_process -= 1

        # gather result
        for _ in range(n_active_process):
            x, y = queue.get()
            xs.append(x)
            ys.append(y)

        return np.array(xs), np.array(ys)
Beispiel #3
0
def generate_random_initial_samples(problem, n_sample):
    '''
    Generate feasible random initial samples
    Input:
        problem: the optimization problem
        n_sample: number of random initial samples
    Output:
        X: initial samples (design parameters)
    '''
    X_feasible = np.zeros((0, problem.n_var))

    max_iter = 1000
    iter_count = 0

    while len(X_feasible) < n_sample and iter_count < max_iter:
        X = lhs(problem.n_var,
                n_sample)  # TODO: support other types of initialization
        X = problem.xl + X * (problem.xu - problem.xl)
        feasible = problem.evaluate_feasible(
            X)  # NOTE: assume constraint evaluation is fast
        X_feasible = np.vstack([X_feasible, X[feasible]])
        iter_count += 1

    if iter_count >= max_iter and len(X_feasible) < n_sample:
        raise Exception(
            f'hard to generate valid samples, {len(X_feasible)}/{n_sample} generated'
        )

    X = X_feasible[:n_sample]
    return problem.transformation.undo(X)
Beispiel #4
0
    def _solve(self, X, Y, batch_size):

        # initialize population
        if len(X) < self.pop_size:
            X = np.vstack([X, lhs(X.shape[1], self.pop_size - len(X))])
        elif len(X) > self.pop_size:
            sorted_indices = NonDominatedSorting().do(Y)
            X = X[sorted_indices[:self.pop_size]]
        self.algo.initialization.sampling = X

        res = minimize(self.problem, self.algo, ('n_gen', self.n_gen))

        X_candidate, Y_candidate, algo = res.pop.get('X'), res.pop.get(
            'F'), res.algorithm
        G = Y_candidate

        _, curr_pset_idx = find_pareto_front(Y, return_index=True)
        curr_pset = X[curr_pset_idx]

        G_s = algo._decomposition.do(
            G, weights=self.ref_dirs,
            ideal_point=algo.ideal_point)  # scalarized acquisition value

        # build candidate pool Q
        Q_x, Q_dir, Q_g, Q_gs = [], [], [], []
        X_added = curr_pset.copy()
        for x, ref_dir, g, gs in zip(X_candidate, self.ref_dirs, G, G_s):
            if (x != X_added).any(axis=1).all():
                Q_x.append(x)
                Q_dir.append(ref_dir)
                Q_g.append(g)
                Q_gs.append(gs)
                X_added = np.vstack([X_added, x])
        Q_x, Q_dir, Q_g, Q_gs = np.array(Q_x), np.array(Q_dir), np.array(
            Q_g), np.array(Q_gs)

        min_batch_size = min(batch_size,
                             len(Q_x))  # in case Q is smaller than batch size

        if min_batch_size == 0:
            indices = np.random.choice(len(X_candidate),
                                       batch_size,
                                       replace=False)
            return X_candidate[indices], Y_candidate[indices]

        # k-means clustering on X with weight vectors
        labels = KMeans(n_clusters=batch_size).fit_predict(
            np.column_stack([Q_x, Q_dir]))

        # select point in each cluster with lowest scalarized acquisition value
        X_candidate, Y_candidate = [], []
        for i in range(batch_size):
            indices = np.where(labels == i)[0]
            top_idx = indices[np.argmin(Q_gs[indices])]
            X_candidate.append(Q_x[top_idx])
            Y_candidate.append(Q_g[top_idx])

        return np.array(X_candidate), np.array(Y_candidate)
Beispiel #5
0
    def _solve(self, X, Y, batch_size):

        # initialize population
        X = np.vstack([X, lhs(X.shape[1], batch_size)])
        self.algo.initialization.sampling = X

        res = minimize(self.problem, self.algo, ('n_gen', self.n_gen))

        return res.pop.get('X'), res.pop.get('F')
Beispiel #6
0
    def _solve(self, X, Y, batch_size):

        # initialize population
        X = np.vstack([X, lhs(X.shape[1], batch_size)])
        self.algo.initialization.sampling = X

        res = minimize(self.problem, self.algo)
        opt_X, opt_F = res.pop.get('X'), res.pop.get('F')
        opt_idx = np.argsort(opt_F.flatten())[:batch_size]

        return opt_X[opt_idx], opt_F[opt_idx]
Beispiel #7
0
    def _fit(self, X, Y):
        X, Y = self.normalization.do(x=X, y=Y)

        self.thetas, self.Ws, self.bs, self.sf2s = [], [], [], []
        n_sample = X.shape[0]
        gps, n_var, nu = self.surrogate_model.gps, self.surrogate_model.n_var, self.surrogate_model.nu

        for i, gp in enumerate(gps):
            gp.fit(X, Y[:, i])

            ell = np.exp(gp.kernel_.theta[1:-1])
            sf2 = np.exp(2 * gp.kernel_.theta[0])
            sn2 = np.exp(2 * gp.kernel_.theta[-1])

            sw1, sw2 = lhs(n_var, self.M), lhs(n_var, self.M)
            if nu > 0:
                W = np.tile(1. / ell, (self.M, 1)) * norm.ppf(sw1) * np.sqrt(
                    nu / chi2.ppf(sw2, df=nu))
            else:
                W = np.random.uniform(size=(self.M, n_var)) * np.tile(
                    1. / ell, (self.M, 1))
            b = 2 * np.pi * lhs(1, self.M)
            phi = np.sqrt(2. * sf2 /
                          self.M) * np.cos(W @ X.T + np.tile(b, (1, n_sample)))
            A = phi @ phi.T + sn2 * np.eye(self.M)
            invcholA = LA.inv(LA.cholesky(A))
            invA = invcholA.T @ invcholA
            mu_theta = invA @ phi @ Y[:, i]
            if self.mean_sample:
                theta = mu_theta
            else:
                cov_theta = sn2 * invA
                cov_theta = 0.5 * (cov_theta + cov_theta.T)
                theta = mu_theta + LA.cholesky(
                    cov_theta) @ np.random.standard_normal(self.M)

            self.thetas.append(theta.copy())
            self.Ws.append(W.copy())
            self.bs.append(b.copy())
            self.sf2s.append(sf2)
Beispiel #8
0
    def _solve(self, X, Y, batch_size):

        # initialize population
        X = np.vstack([X, lhs(X.shape[1], batch_size)])
        F = self.problem.evaluate(X)
        x0 = X[np.argmin(F)]

        algo = CMAESAlgo(x0=x0)
        res = minimize(self.problem, algo)
        opt_X, opt_F = res.pop.get('X'), res.pop.get('F')
        opt_idx = np.argsort(opt_F.flatten())[:batch_size]

        return opt_X[opt_idx], opt_F[opt_idx]
    def _solve(self, X, Y, batch_size):
        # initialize population
        X = np.vstack([X, lhs(X.shape[1], batch_size)])
        self.algo.initialization.sampling = X

        res = minimize_ea(self.problem, self.algo, ('n_gen', self.n_gen))

        X_candidate, Y_candidate = res.pop.get('X'), res.pop.get('F')
        algo = res.algorithm

        curr_pfront = find_pareto_front(Y)
        ref_point = np.max(np.vstack([Y, Y_candidate]), axis=0)
        X_candidate, Y_candidate, _ = algo.propose_next_batch(
            curr_pfront, ref_point, batch_size)

        return X_candidate, Y_candidate
Beispiel #10
0
def estimate_lipschitz_constant(surrogate_model, n_sample=500):
    '''
    Estimate Lipschitz constant from a surrogate model
    
    Returns
    -------
    L: float
    '''
    n_var, n_obj = surrogate_model.n_var, surrogate_model.n_obj

    # find a good x0
    X_sample = lhs(n_var, n_sample)
    dF_norm_sample = calc_dF_norm(X_sample, surrogate_model)

    # optimize for L for each objective
    bounds = np.vstack([np.zeros(n_var), np.ones(n_var)]).T
    L = np.zeros(n_obj)
    for i in range(n_obj):
        x0 = X_sample[np.argmin(dF_norm_sample[:, i])]
        res = minimize(calc_dF_norm_per_obj, x0, method='L-BFGS-B', bounds=bounds, args=(surrogate_model, i))
        L[i] = -float(res.fun)
        if L[i] < 1e-7: L[i] = 10.0

    return L