コード例 #1
0
ファイル: glisp.py プロジェクト: j-cap/Data-Driven-Basics
    def facquisition_pref(xx, X, N, delta_E, dF, beta, rbf, epsil,
                          theta, sepvalue, ibest, acquisition_method, isUnknownFeasibilityConstrained, isUnknownSatisfactionConstrained,Feasibility_unkn,SatConst_unkn,delta_G,delta_S,iw_ibest,maxevals):
        # Acquisition function to minimize to get next sample

        epsilth = epsil * theta

        if acquisition_method == 1:
            v = rbf(X[0:N, :], xx, epsilth)
            fhat = v.ravel().dot(W.ravel())

            d = vecsum((X[0:N, ] - xx) ** 2, axis=-1)

            ii = where(d < 1e-12)
            if ii[0].size > 0:
                dhat = 0.0
                if isUnknownFeasibilityConstrained:
                    Ghat = Feasibility_unkn[ii]
                else:
                    Ghat = 1
                if isUnknownSatisfactionConstrained:
                    Shat = SatConst_unkn[ii]
                else:
                    Shat = 1
            else:
                w = vecexp(-d) / d
                sw = sum(w)
                if maxevals <= 30:
                    dhat = delta_E * atan(1.0 / sum(1.0 / d))  # for comparision, used in the original GLISp and when N_max <= 30 in C-GLISp
                else:
                    dhat = delta_E * ((1-N/maxevals)*atan((1/sum(1./d))/iw_ibest)+ N/maxevals *atan(1/sum(1./d)))  # used in C-GLISp

                if isUnknownFeasibilityConstrained:
                    Ghat = vecsum(Feasibility_unkn[0:N].T * w) / sw
                else:
                    Ghat = 1

                if isUnknownSatisfactionConstrained:
                    Shat = vecsum(SatConst_unkn[0:N].T * w) / sw
                else:
                    Shat = 1

            # f = fhat / dF - dhat  # for comparision, used in GLISp
            f = fhat / dF - dhat + delta_G * (1 - Ghat) + delta_S * (1 - Shat)  # used in C-GLISp

        elif acquisition_method == 2:

            v = rbf(X[0:N, :], xx, epsilth) - rbf(X[0:N, :], X[ibest, :], epsilth)
            PHIbeta = v.ravel().dot(beta.ravel())

            lm1 = max(PHIbeta + sepvalue, 0.0)
            l0 = max(0, PHIbeta - sepvalue, -PHIbeta - sepvalue)
            l1 = max(sepvalue - PHIbeta, 0.0)
            c0 = 1.0
            cm1 = 1.0
            c1 = 1.0
            em1 = exp(-cm1 * lm1)
            f = -em1 / (em1 + exp(-c0 * l0) + exp(-c1 * l1))

        return f
コード例 #2
0
ファイル: glisp.py プロジェクト: j-cap/Data-Driven-Basics
    def get_delta_adpt(X,constraint_set,delta_const_default):
        ind = constraint_set.shape[0]
        sqr_error_feas = zeros((ind,1))
        for i in range(0,ind):
            xx = X[i,:]
            Xi = vstack((X[0:i, :], X[i + 1:ind, :]))
            constraint_set_i = vstack((constraint_set[0:i,],constraint_set[i+1:ind,]))
            Feas_xx = constraint_set[i]
            d = vecsum((Xi - xx) ** 2, axis=-1)
            w = vecexp(-d)/d
            sw = sum(w)
            ghat = vecsum(constraint_set_i.T * w) / sw
            sqr_error_feas[i] = (ghat-Feas_xx)**2

        std_feas = (sum(sqr_error_feas)/(ind-1))**(1/2)
        delta_adpt = (1-std_feas) *delta_const_default

        return delta_adpt
コード例 #3
0
def set(nvars):
    """
    Generate default problem structure for IDW-RBF Global Optimization.
    
    problem = idwgopt_default.set(n) generate a default problem structure for a
    an optimization with n variables.
     
    (C) 2019 A. Bemporad, July 6, 2019
    """

    from numpy import zeros, ones
    from numpy import sum as vecsum

    problem = {
        "f": "[set cost function here]",  # cost function to minimize
        "lb": -1 * ones(
            (nvars, 1)),  # lower bounds on the optimization variables
        "ub": 1 * ones(
            (nvars, 1)),  # upper bounds on the optimization variables
        "maxevals": 20,  # maximum number of function evaluations
        "alpha": 1,  # weight on function uncertainty variance measured by IDW
        "delta": 0.5,  # weight on distance from previous samples
        "nsamp": 2 * nvars,  # number of initial samples
        "useRBF": 1,  # 1 = use RBFs, 0 = use IDW interpolation
        "rbf": lambda x1, x2: 1 / (1 + 0.25 * vecsum(
            (x1 - x2)**2)),  # inverse quadratic 
        #                       RBF function (only used if useRBF=1)
        "scalevars": 1,  # scale problem variables
        "svdtol": 1e-6,  # tolerance used to discard small singular values
        "Aineq": zeros(
            (0, nvars)),  # matrix A defining linear inequality constraints 
        "bineq": zeros((0, 1)),  # right hand side of constraints A*x <= b
        "g":
        0,  # constraint function. Example: problem.g = lambda x: x[0]**2+x[1]**2-1
        "shrink_range":
        1,  # flag. If 0, disable shrinking lb and ub to bounding box of feasible set
        "constraint_penalty":
        1000,  # penalty term on violation of linear inequality
        #                             and nonlinear constraint
        "feasible_sampling":
        False,  # if True, initial samples are forced to be feasible
        "globoptsol": "direct",  # nonlinear solver used during acquisition.
        # interfaced solvers are:
        #   "direct" DIRECT from NLopt tool (nlopt.readthedocs.io)
        #   "pswarm" PySwarm solver (pythonhosted.org/pyswarm/)
        "display": 0,  # verbosity level (0=minimum)
        "PSOiters": 500,  # number of iterations in PSO solver
        "PSOswarmsize": 20,  # swarm size in PSO solver
        "epsDeltaF":
        1e-4  # minimum value used to scale the IDW distance function
    }

    return problem
コード例 #4
0
    if use_nl_constraints and benchmark == "camelsixhumps":
        #problem["g"] = lambda x: array([(x[0]-1)**2+x[1]**2-.25,
        #       (x[0]-0.5)**2+(x[1]-0.5)**2-.25])
        problem["g"] = lambda x: array([x[0]**2 + (x[1] + 0.1)**2 - .5])

    problem["lb"] = lb
    problem["ub"] = ub
    problem["maxevals"] = maxevals
    problem["sepvalue"] = 1. / maxevals

    pref_fun = glisp_function(fun, comparetol)  # preference function object
    pref = lambda x, y: pref_fun.eval(x, y)
    problem["pref"] = pref

    epsil = 1.
    problem["rbf"] = lambda x1, x2, epsil: 1. / (1. + epsil**2 * vecsum(
        (x1 - x2)**2, axis=-1))  # inverse quadratic
    #problem["rbf"] = lambda x1,x2,epsil: exp(-(epsil**2*vecsum((x1-x2)**2,axis=-1)) # Gaussian RBF
    #problem["rbf"] = lambda x1,x2,epsil: sqrt((1.+epsil**2*vecsum((x1-x2)**2,axis=-1)) # multiquadric
    problem["epsil"] = epsil

    problem["RBFcalibrate"] = RBFcalibrate
    problem["thetas"] = logspace(-1, 1, 10, False)

    problem["delta"] = delta

    problem["nsamp"] = nsamp
    problem["svdtol"] = 1e-6
    #problem["globoptsol"] = "direct"
    problem["globoptsol"] = "pswarm"
    problem["display"] = 1
コード例 #5
0
ファイル: glisp.py プロジェクト: j-cap/Data-Driven-Basics
 def fun_rbf(x1, x2):
     return 1.0 / (1.0 + epsil ** 2 * vecsum((x1 - x2) ** 2, axis=-1))
コード例 #6
0
ファイル: glisp.py プロジェクト: j-cap/Data-Driven-Basics
def solve(prob):
    """
    Active preference learning to solve global optimization
    problem using radial basis functions (RBFs) to fit a surrogate of the
    latent function to minimize by preference queries. The acquisition
    used to generate new samples is based either on inverse distance
    weighting (IDW) or probability of improvement.

    (C) 2019 A. Bemporad

    sol = glisp.solve(prob) solves the active preference learning
    problem

    find x such that pref(x,y) <= 0 for all x,y in X,
    X = {x: lb <= x <=ub, A*x <=b, g(x)<=0}

    where pref(x,y) = -1 if x "better than" y
                       0 if x "as good as" y
                       1 if x "worse than" y

    A special case is to solve the global optimization problem

    min  f(x)
    s.t. lb <= x <=ub, A*x <=b, g(x)<=0

    based only on comparisons between function values

    pref(x,y) = -1 if f(x1) <= f(x2) - tol
              =  0 if |f(x1)-f(x2)| <= tol
              =  1 if f(x1) >= f(x2) + tol

    where tol is the threshold deciding the outcome of the comparison,
    i.e., comparison is "even" if |f(x1)-f(x2)| <= tol

    The algorithm is described in [1] and builds upon the global optimization
    algorithm described in [2] and is particularly useful when f(x) is not
    defined, but rather only comparisons between samples is available.

    The default problem structure is

    prob = glisp.default(nvars)

    where nvars = dimension of optimization vector x.

    See function glis_default for a description of all other available
    options.

    The following options differ from glis:
        prob["pref"] is the preference function pref(x1,x2)

        prob["sepvalue"] is the value used in constructing the surrogate function fhat:
            fhat(x1)<=fhat(x2)-sepvalue if pref(x1,x2) = -1
            |fhat(x1)-fhat(x2)|<=sepvalue if pref(x1,x2) = 0.

        prob["epsil"] epsil parameter used in defining RBF

        prob["rbf"] the RBF function also includes epsil as a parameter:
            rbf = fun(x1,x2,epsil)

        prob["RBFcalibrate"] if true, recalibrate scaling parameter theta of RBF
            during iterations, i.e., fun(x1,x2,epsil*theta) is
            used as RBF.

        prob["thetas"] array of theta values to test during calibration.
             At least one element in thetas must be 1.

        prob["RBFcalibrationSteps"] iterations at which RBF is recalibrated.
             Values smaller than prob["nsamp"] are ignored.

        prob["comparetol"] threshold to decide outcome of comparison during
            recalibration in cross validation, comparison is "even" if
            |fhat(x1)-fhat(x2)|<=comparetol, fhat=surrogate function.

        prob["acquisition_method"] acquisition method
            1 = scaled surrogate - delta * IDW
            2 = probability of improvement

        prog["f"]         not used
        prob["svdtol"]    not used
        prob["alpha"]     not used
        prob["epsDeltaF"] redundant if epsDeltaF<=1
        prob["useRBF"]    forced to true.

    The output argument 'out' is a structure reporting the following information:

    out["X"]:     trace of all samples x generated by the algorithm
    out["I"]:     results of comparisons: out.X(out.I(j,1)) "better than" out.X(out.I(j,2))
    out["Ieq"]:   results of comparisons: out.X(out.Ieq(j,1)) "as good as" out.X(out.Ieq(j,2))
    out["W"]:     final set of weights
    out["M"]:     final RBF matrix
    out["xopt"]:  best sample found during search
    out["theta"]: scaling parameter multiplied by epsil in final RBF matrix

    out["time_iter"], out["time_opt_acquisition"], out["time_fit_surrogate"], out["time_f_eval"]
    store timing recorded during the execution of the algorithm.

    Required Python packages:
        pyDOE:     https://pythonhosted.org/pyDOE/
        nlopt:     https://nlopt.readthedocs.io (required only if DIRECT solver is used)
        pyswarm:   https://pythonhosted.org/pyswarm/ (required only if PSO solver is used)
        qpsolvers: https://github.com/stephane-caron/qpsolvers
        cvxopt:    https://cvxopt.org

    [1] A. Bemporad, "Global optimization via inverse weighting and radial basis functions,"
        Computational Optimization and Applications, vol. 77, pp. 571–595.

    [2] A. Bemporad and D. Piga, “Active preference learning based on radial basis functions,”
        Machine Learning, vol. 110, no. 2, pp. 417–448, 2021,
        Available on arXiv at http://arxiv.org/abs/1909.13049.

%%%%%%%%%%%%%%%%%%%%%%
    % (C-GLISp)
    % Note: Add features to handle unknown constraints (M. Zhu, June, 07, 2021)
    %       Known constraints will be handled via penalty functions
    %
    % Following are the new parameters introduced in C-GLISp
    % opts["isUnknownFeasibilityConstrained"]: if true, unknown feasibility constraints are involved
    % opts["isUnknownSatisfactionConstrained"]: if true, unknown satisfaction constraints are involed
    % delta_E: delta for te pure IDW exploration term, \delta_E in the paper
    % delta_G_default: delta for feasibility constraints, \delta_{G,default} in the paper
    % delta_S_default: delta for satisfaction constraints, \delta_{S,default} in the paper
    % Feasibility_unkn: feasibility labels for unknown feasibility constraints
    % SatConst_unkn: satisfaction labels for unknown satisfactory constraints
    """

    import glis.glis_init as glis_init

    from pyswarm import pso # https://pythonhosted.org/pyswarm/
    from qpsolvers import solve_qp
    from numpy import zeros, ones, diag, isin
    from numpy import where, maximum, dot, array, vstack, empty
    from numpy import sum as vecsum
    from numpy import exp as vecexp
    from math import atan, pi, exp
    import contextlib
    import io
    import time

    def get_weights(X, I, Ieq, M, n, ibest, sepvalue):
        # Fit RBF satisfying comparison constraints at sampled points
        #
        # optimization vector x=[beta;epsil] where:
        #    beta  = rbf coefficients
        #    epsil = vector of slack vars, one per constraint

        normalize = 0

        m = I.shape[0]
        meq = Ieq.shape[0]
        A = zeros([m + 2 * meq, n + m + meq])
        b = zeros([m + 2 * meq, 1])
        for k in range(0, m):
            i = I[k][0]
            j = I[k][1]
            # f(x(i))<f(x(j))
            # sum_h(beta(h)*phi(x(i,:),x(h,:))<=sum_h(beta(h)*phi(x(j,:),x(h,:))+eps_k-sepvalue
            A[k, 0:n] = M[i, 0:n] - M[j, 0:n]
            A[k, n + k] = -1.0
            b[k] = -sepvalue

        # |f(x(i))-f(x(j))|<=comparetol
        # --> f(x(i))<=f(x(j))+comparetol+epsil
        # --> f(x(j))<=f(x(i))+comparetol+epsil
        # sum_h(beta(h)*phi(x(i,:),x(h,:))<=sum_h(beta(h)*phi(x(j,:),x(h,:))+sepvalue+epsil
        # sum_h(beta(h)*phi(x(j,:),x(h,:))<=sum_h(beta(h)*phi(x(i,:),x(h,:))+sepvalue+epsil
        for k in range(0, meq):
            i = Ieq[k][0]
            j = Ieq[k][1]
            A[m + 2 * k, 0:n] = M[i, 0:n] - M[j, 0:n]
            A[m + 2 * k, n + m + k] = -1.0
            b[m + 2 * k] = sepvalue
            A[m + 2 * k + 1, 0:n] = M[j, 0:n] - M[i, 0:n]
            A[m + 2 * k + 1, n + m + k] = -1.0
            b[m + 2 * k + 1] = sepvalue

        if normalize:
            # Add constraints to avoid trivial solution surrogate=flat:
            #    sum_h(beta.*phi(x(ibest,:),x(h,:))) = 0
            #    sum_h(beta.*phi(x(ii,:),x(h,:))) = 1

            # Look for sample where function is worse,i.e., f(ii) is largest
            ii = I[0][1]
            for k in range(0, m):
                if I[k][0] == ii:
                    ii = I[k][1]
            Aeq = zeros([2, n + m + meq])
            beq = zeros([2, 1])
            Aeq[0, 0:n] = M[ibest, 0:n]
            Aeq[1, 0:n] = M[ii, 0:n]
            beq[0] = 0.0
            beq[1] = 1.0
        else:
            Aeq = zeros([0, n + m + meq])
            beq = zeros([0, 1])

        c = zeros([n + m + meq, 1])
        # penalize more violations involving zbest
        for i in range(0, m):
            if (I[i][0] == ibest or I[i][1] == ibest):
                c[n + i] = 10.0
            else:
                c[n + i] = 1.0

        for i in range(0, meq):
            if (Ieq[i][0] == ibest or Ieq[i][1] == ibest):
                c[n + m + i] = 10.0
            else:
                c[n + m + i] = 1.0

        # Solve QP problem
        q = zeros(n + m + meq)
        q[0:n] = 1.0e-6
        Q = diag(q)
        if beq.size == 0:
            # x_sol = solve_qp(sparse.csc_matrix(Q), c, sparse.csc_matrix(A), b, solver="osqp")
            x_sol = solve_qp(Q, c, A, b, solver="cvxopt")
        else:
            # x_sol = solve_qp(sparse.csc_matrix(Q), c, sparse.csc_matrix(A), b,
            #    sparse.csc_matrix(Aeq), beq, solver="osqp")
            x_sol = solve_qp(Q, c, A, b, Aeq, beq, solver="cvxopt")

        try:
            beta = x_sol[0:n].reshape(n, 1)
        except:
            # try with better conditioned problem
            q[0:n] = 1.0e-3
            Q = diag(q)
            x_sol = solve_qp(Q, c, A, b, Aeq, beq, solver="cvxopt")
            beta = x_sol[0:n].reshape(n, 1)

        return beta

    def get_delta_adpt(X,constraint_set,delta_const_default):
        ind = constraint_set.shape[0]
        sqr_error_feas = zeros((ind,1))
        for i in range(0,ind):
            xx = X[i,:]
            Xi = vstack((X[0:i, :], X[i + 1:ind, :]))
            constraint_set_i = vstack((constraint_set[0:i,],constraint_set[i+1:ind,]))
            Feas_xx = constraint_set[i]
            d = vecsum((Xi - xx) ** 2, axis=-1)
            w = vecexp(-d)/d
            sw = sum(w)
            ghat = vecsum(constraint_set_i.T * w) / sw
            sqr_error_feas[i] = (ghat-Feas_xx)**2

        std_feas = (sum(sqr_error_feas)/(ind-1))**(1/2)
        delta_adpt = (1-std_feas) *delta_const_default

        return delta_adpt

    def facquisition_pref(xx, X, N, delta_E, dF, beta, rbf, epsil,
                          theta, sepvalue, ibest, acquisition_method, isUnknownFeasibilityConstrained, isUnknownSatisfactionConstrained,Feasibility_unkn,SatConst_unkn,delta_G,delta_S,iw_ibest,maxevals):
        # Acquisition function to minimize to get next sample

        epsilth = epsil * theta

        if acquisition_method == 1:
            v = rbf(X[0:N, :], xx, epsilth)
            fhat = v.ravel().dot(W.ravel())

            d = vecsum((X[0:N, ] - xx) ** 2, axis=-1)

            ii = where(d < 1e-12)
            if ii[0].size > 0:
                dhat = 0.0
                if isUnknownFeasibilityConstrained:
                    Ghat = Feasibility_unkn[ii]
                else:
                    Ghat = 1
                if isUnknownSatisfactionConstrained:
                    Shat = SatConst_unkn[ii]
                else:
                    Shat = 1
            else:
                w = vecexp(-d) / d
                sw = sum(w)
                if maxevals <= 30:
                    dhat = delta_E * atan(1.0 / sum(1.0 / d))  # for comparision, used in the original GLISp and when N_max <= 30 in C-GLISp
                else:
                    dhat = delta_E * ((1-N/maxevals)*atan((1/sum(1./d))/iw_ibest)+ N/maxevals *atan(1/sum(1./d)))  # used in C-GLISp

                if isUnknownFeasibilityConstrained:
                    Ghat = vecsum(Feasibility_unkn[0:N].T * w) / sw
                else:
                    Ghat = 1

                if isUnknownSatisfactionConstrained:
                    Shat = vecsum(SatConst_unkn[0:N].T * w) / sw
                else:
                    Shat = 1

            # f = fhat / dF - dhat  # for comparision, used in GLISp
            f = fhat / dF - dhat + delta_G * (1 - Ghat) + delta_S * (1 - Shat)  # used in C-GLISp

        elif acquisition_method == 2:

            v = rbf(X[0:N, :], xx, epsilth) - rbf(X[0:N, :], X[ibest, :], epsilth)
            PHIbeta = v.ravel().dot(beta.ravel())

            lm1 = max(PHIbeta + sepvalue, 0.0)
            l0 = max(0, PHIbeta - sepvalue, -PHIbeta - sepvalue)
            l1 = max(sepvalue - PHIbeta, 0.0)
            c0 = 1.0
            cm1 = 1.0
            c1 = 1.0
            em1 = exp(-cm1 * lm1)
            f = -em1 / (em1 + exp(-c0 * l0) + exp(-c1 * l1))

        return f

    def results_display(N, z, display, nvar, scalevars, dd, d0, epsil, RBFcalibrate):
        # Display intermediate results
        if display > 0:

            string = ("N = %4d: x = [" % N)
            for j in range(nvar):
                aux = z[j]
                if scalevars:
                    aux = aux * dd[j] + d0[j]

                string = string + ('%7.4f' % aux)
                if j < nvar - 1:
                    string = string + ", "

            string = string + "], "

            if RBFcalibrate:
                string = string + "\u03B5(rbf) = " + ('%5.4f' % epsil)

            print(string)

        return

    def rbf_calibrate(X, I, Ieq, M, N, ibest, sepvalue, rbf, epsil, comparetol, display, thetas, MM, iM, itheta):
        # calibrate scaling of epsil parameter in RBF by cross-validation

        if display > 0:
            print("Recalibrating RBF: ", end='')

        # MM is a maxevals-by-maxevals-by-numel(thetas) matrix
        # used to save previously computed RBF values
        nth = thetas.size
        imax = 0
        successmax = -1

        for k in range(nth):

            epsilth = epsil * thetas[k]

            # Update matrix containing RBF values for all thetas
            if (k == itheta):
                MM[itheta, 0:N][:, 0:N] = M.copy()[0:N, 0:N]  # values already computed for current theta
            else:
                for j in range(iM + 1, N):
                    for h in range(N):
                        MM[k, j, h] = rbf(X[j, :], X[h, :], epsilth)
                        MM[k, h, j] = MM.copy()[k, j, h]

            Ncomparisons = 0
            success = 0

            for i in range(N):
                if not (i == ibest):
                    Xi = vstack((X[0:i, :], X[i + 1:N, :]))
                    if ibest > i:
                        newibest = ibest - 1
                    else:
                        newibest = ibest

                    Ii = empty((0, 2)).astype('int')
                    ni = 0
                    for j in range(I.shape[0]):
                        if not (I[j, 0] == i) and not (I[j, 1] == i):
                            Ii = vstack((Ii, I[j, :]))
                            if I[j, 0] > i:
                                Ii[ni, 0] = Ii[ni, 0] - 1
                            if I[j, 1] > i:
                                Ii[ni, 1] = Ii[ni, 1] - 1
                            ni = ni + 1

                    Ieqi = empty((0, 2)).astype('int')
                    ni = 0
                    for j in range(Ieq.shape[0]):
                        if not (Ieq[j, 0] == i) and not (Ieq[j, 1] == i):
                            Ieqi = vstack((Ieqi, Ieq[j, :]))
                            if Ieq[j, 0] > i:
                                Ieqi[ni, 0] = Ieqi[ni, 0] - 1
                            if Ieq[j, 1] > i:
                                Ieqi[ni, 1] = Ieqi[ni, 1] - 1
                            ni = ni + 1

                    ind = list(range(0, i)) + list(range(i + 1, N))
                    Mi = MM[k, ind][:, ind]  # no need to copy, as Mi is not changed

                    Wi = get_weights(Xi, Ii, Ieqi, Mi, N - 1, newibest, sepvalue)
                    # Compute RBF @X[i,:]
                    FH = zeros((N, 1))
                    FH[ind] = dot(Mi, Wi)  # rbf at samples

                    v = rbf(Xi[0:N - 1, :], X[i, :], epsilth)
                    FH[i] = v.ravel().dot(Wi.ravel())

                    # Cross validation
                    for j in range(I.shape[0]):
                        if (I[j, 0] == i) or (I[j, 1] == i):
                            Ncomparisons = Ncomparisons + 1
                            i1 = I[j, 0]
                            i2 = I[j, 1]
                            if FH[i1] <= FH[i2] - comparetol:
                                success = success + 1

                    for j in range(Ieq.shape[0]):
                        if (Ieq[j, 0] == i) or (Ieq[j, 1] == i):
                            Ncomparisons = Ncomparisons + 1
                            i1 = Ieq[j, 0]
                            i2 = Ieq[j, 1]
                            if FH[i1] <= FH[i2] - comparetol:
                                success = success + 1
            if (display > 0):
                print(".", end='')

            success = success / Ncomparisons * 100.0
            # NOTE: normalization is only for visualization purposes

            # Find theta such that success is max, and closest to 1 among maximizers
            if (success > successmax) or (success == successmax and
                                          (thetas[k] - 1) ** 2 < (thetas[imax] - 1) ** 2):
                imax = k
                successmax = success

        itheta = imax
        theta = thetas[itheta]
        iM = N - 1

        # Update matrix M
        M[0:N, 0:N] = MM.copy()[itheta, 0:N][:, 0:N]

        print(" done.")

        return theta, itheta, M, MM, iM

    #############################
    # Start of main function

    prob["useRBF"] = True
    prob["alpha"] = 0.0  # dummy
    epsil = prob["epsil"]
    prob["f"] = lambda x: 0.0  # dummy
    rbf = prob["rbf"]
    prob["rbf"] = lambda x1, x2: rbf(x1, x2, epsil)  # used to create matrix M

    (_, lb, ub, nvar, Aineq, bineq, g, isLinConstrained, isNLConstrained,
     X, _, z, nsamp, maxevals, epsDeltaF, alpha, delta, rhoC, display, _,
     dd, d0, _, _, M, scalevars, globoptsol, DIRECTopt,
     PSOiters, PSOswarmsize) = glis_init.init(prob)

    prob["rbf"] = rbf
    isUnknownFeasibilityConstrained = prob["isUnknownFeasibilityConstrained"]
    isUnknownSatisfactionConstrained = prob['isUnknownSatisfactionConstrained']
    delta_E = delta
    delta_G_default = delta
    delta_S_default = delta/2

    if scalevars:
        # Rescale problem variables in [-1,1]
        pref = lambda x, y: prob["pref"](x * dd + d0, y * dd + d0)
    else:
        pref = prob["pref"]

    time_iter = []
    time_f_eval = []
    time_opt_acquisition = []
    time_fit_surrogate = []

    sepvalue = prob["sepvalue"]
    RBFcalibrate = prob["RBFcalibrate"]
    if RBFcalibrate:
        found = False
        thetas = prob["thetas"]
        for i in range(0, thetas.size):
            if abs(thetas[i] - 1.0) <= 1.0e-14:
                found = True
                thetas[i] = 1.0
                itheta = i
                break

        if not (found):
            raise NameError('At least one element in thetas must be equal to 1')

        MM = zeros((thetas.size, maxevals, maxevals))
        iM = -1  # index denoting the portion of MM already computed

    RBFcalibrationSteps = prob["RBFcalibrationSteps"]
    if RBFcalibrationSteps.size == 0:
        RBFcalibrationSteps = array([nsamp, nsamp + round((maxevals - nsamp) / 4),
                                     nsamp + round((maxevals - nsamp) / 2),
                                     nsamp + round(3 * (maxevals - nsamp) / 4)])
    comparetol = prob["comparetol"]
    acquisition_method = prob["acquisition_method"]

    # Fills in initial preference vectors and find best initial guess
    zbest = X[0,].flatten("c")
    ibest = 0
    I = []  # I[i,1:2]=[h k] if F(h)<F(k)-comparetol
    Ieq = []  # Ieq[i,1:2]=[h k] if |F(h)-F(k)|<=comparetol
    Feasibility_unkn = [] # feasibility labels
    SatConst_unkn = [] # satisfactory constraint label
    isfeas_seq = ones((maxevals,1)).astype(int) # keep track the feasibility of the decision variables(including both known and unknown constraints)
    ibestseq = ones((maxevals,1)).astype(int) # keep track of the ibest throughout

    # Initial sampling phase
    for i in range(1, nsamp):
        if i == 1:
            time_fun_eval_start = time.perf_counter()
            if isUnknownSatisfactionConstrained and isUnknownFeasibilityConstrained: # when has both unknown feasibility and satisfactory constraints
                (prefi, fesi, fesbest,satconsti,satconstbest) = pref(X[i,].flatten("c"), zbest)
                SatConst_unkn.append([satconstbest])
                Feasibility_unkn.append([fesbest])
            elif ~isUnknownSatisfactionConstrained and isUnknownFeasibilityConstrained: # when only has unknown feasibility constraints
                (prefi, fesi, fesbest) = pref(X[i,].flatten("c"), zbest)
                Feasibility_unkn.append([fesbest])
            elif isUnknownSatisfactionConstrained and ~isUnknownFeasibilityConstrained:  # when only has unknown satisfactory constraints
                (prefi, satconsti,satconstbest) = pref(X[i,].flatten("c"), zbest)
                SatConst_unkn.append([satconstbest])
            else: # when there is no unknown constraints
                prefi = pref(X[i,].flatten("c"), zbest)
            time_fun_eval_i = time.perf_counter() - time_fun_eval_start
            time_iter.append(time_fun_eval_i)
            time_f_eval.append(time_fun_eval_i)
            time_opt_acquisition.append(0.0)
            time_fit_surrogate.append(0.0)
        else:
            time_fun_eval_start = time.perf_counter()
            if isUnknownSatisfactionConstrained and isUnknownFeasibilityConstrained:
                (prefi, fesi,_,satconsti,_) = pref(X[i,].flatten("c"), zbest)
            elif ~isUnknownSatisfactionConstrained and isUnknownFeasibilityConstrained: # when only has unknown feasibility constraints
                (prefi, fesi, _) = pref(X[i,].flatten("c"), zbest)
            elif isUnknownSatisfactionConstrained and ~isUnknownFeasibilityConstrained:  # when only has unknown satisfactory constraints
                (prefi, satconsti,_) = pref(X[i,].flatten("c"), zbest)
            else: # when there is no unknown constraints
                prefi = pref(X[i,].flatten("c"), zbest)
            time_fun_eval_i = time.perf_counter() - time_fun_eval_start
            time_iter.append(time_fun_eval_i)
            time_f_eval.append(time_fun_eval_i)
            time_opt_acquisition.append(0.0)
            time_fit_surrogate.append(0.0)

        if isUnknownFeasibilityConstrained:
            Feasibility_unkn.append([fesi])
        if isUnknownSatisfactionConstrained:
            SatConst_unkn.append([satconsti])

        isfeas = True
        # for known constraints(Note: no query is required for known constraints)
        if isLinConstrained:
            isfeas = isfeas and all(Aineq.dot(X[i,].T) <= bineq.flatten("c"))
        if isNLConstrained:
            isfeas = isfeas and all(g(X[i,]) <= 0)
        if isUnknownFeasibilityConstrained:
            isfeas = isfeas and fesi> 0
        if isUnknownSatisfactionConstrained:
            isfeas = isfeas and satconsti > 0

        if prefi == -1:
            I.append([i, ibest])
            zbest = X[i,].flatten("c")
            ibest = i
        elif prefi == 1:
            I.append([ibest, i])
        else:
            Ieq.append([i, ibest])
        ibestseq[i] = ibest
        isfeas_seq[i] = isfeas

    I = array(I).astype(int)
    Ieq = array(Ieq).astype(int)

    Feasibility_unkn = array(Feasibility_unkn).astype(int)
    SatConst_unkn = array(SatConst_unkn).astype((int))

    if I.size == 0:
        I = empty((0, 2), int)
    if Ieq.size == 0:
        Ieq = empty((0, 2), int)

    N = nsamp

    if RBFcalibrate:
        theta, itheta, M, MM, iM = rbf_calibrate(X, I, Ieq, M, N, ibest, sepvalue, rbf, epsil, comparetol, display,
                                                 thetas, MM, iM, itheta)
    else:
        theta = 1.0

    W = get_weights(X, I, Ieq, M, N, ibest, sepvalue)

    delta_G = get_delta_adpt(X,Feasibility_unkn,delta_G_default)
    if isUnknownSatisfactionConstrained:
        delta_S = get_delta_adpt(X,SatConst_unkn,delta_S_default)
    else:
        delta_S =0

    results_display(N, zbest, display, nvar, scalevars, dd, d0, theta * epsil, RBFcalibrate)

    # Active learning phase
    while N < maxevals:

        time_iter_start = time.perf_counter()

        # Compute range of current surrogate function
        FH = dot(M[0:N, 0:N], W)  # surrogate at current samples
        dF = max(max(FH) - min(FH), epsDeltaF)

        if isLinConstrained or isNLConstrained:
            # penalty = rhoC * dF # for GLIS
            penalty = rhoC  # for GLISp
        if isLinConstrained and isNLConstrained:
            constrpenalty = lambda x: (penalty * (sum(maximum((Aineq.dot(x) - bineq).flatten("c"), 0.0) ** 2)
                                                  + sum(maximum(g(x), 0) ** 2)))
        elif isLinConstrained and not isNLConstrained:
            constrpenalty = lambda x: penalty * (sum(maximum((Aineq.dot(x) - bineq).flatten("c"), 0.0) ** 2))
        elif not isLinConstrained and isNLConstrained:
            constrpenalty = lambda x: penalty * sum(maximum(g(x), 0.0) ** 2)
        else:
            constrpenalty = lambda x: 0.0

        # Compute the IDW function of current best x (used in the acquisition term)
        d_ibest = vecsum((vstack((X[0:ibest,:],X[ibest+1:,:]))-X[ibest,:])**2,axis=-1)
        ii = where(d_ibest<1e-12)
        if ii[0].size > 0:
            iw_ibest = 0
        else:
            iw_ibest = 1.0/sum(1.0/d_ibest)


        acquisition = lambda x: (facquisition_pref(x, X, N, delta_E, dF, W, rbf, epsil,
                                                   theta, sepvalue, ibest, acquisition_method, isUnknownFeasibilityConstrained,isUnknownSatisfactionConstrained,Feasibility_unkn,SatConst_unkn,delta_G,delta_S,iw_ibest,maxevals) + constrpenalty(x))

        time_opt_acq_start = time.perf_counter()

        if globoptsol == "pswarm":
            # pso(func, lb, ub, ieqcons=[], f_ieqcons=None, args=(), kwargs={},
            #    swarmsize=100, omega=0.5, phip=0.5, phig=0.5, maxiter=100, minstep=1e-8,
            #    minfunc=1e-8, debug=False)
            with contextlib.redirect_stdout(io.StringIO()):
                z, cost = pso(acquisition, lb, ub, swarmsize=PSOswarmsize,
                              minfunc=dF * 1.0e-8, maxiter=PSOiters)

        elif globoptsol == "direct":
            DIRECTopt.set_min_objective(lambda x, grad: acquisition(x)[0])
            z = DIRECTopt.optimize(z.flatten("c"))

        time_opt_acquisition.append(time.perf_counter() - time_opt_acq_start)
        time_fun_eval_start = time.perf_counter()
        time_f_eval.append(time.perf_counter() - time_fun_eval_start)

        N = N + 1

        X[N - 1,] = z.T

        time_fit_surrogate_start = time.perf_counter()

        # Just update last row and column of M
        epsilth = epsil * theta
        for h in range(N):
            mij = rbf(X[h,], X[N - 1,], epsilth)
            M[h, N - 1] = mij
            M[N - 1, h] = mij
        time_fit_surrogate.append(time.perf_counter() - time_fit_surrogate_start)

        #
        # Assessment of comparison
        #

        if isUnknownSatisfactionConstrained and isUnknownFeasibilityConstrained:
            (prefN,fesN,_,satconstN,_) = pref(z,zbest) # preference query
        elif ~isUnknownSatisfactionConstrained and isUnknownFeasibilityConstrained:
            (prefN, fesN, _) = pref(z, zbest)
        elif isUnknownSatisfactionConstrained and ~isUnknownFeasibilityConstrained:
            (prefN, satconstN, _) = pref(z, zbest)
        else:
            prefN = pref(z, zbest)

        if isUnknownFeasibilityConstrained:
            Feasibility_unkn = vstack((Feasibility_unkn,fesN))
            delta_G = get_delta_adpt(X, Feasibility_unkn, delta_G_default)
        else:
            delta_G = 0

        if isUnknownSatisfactionConstrained:
            SatConst_unkn = vstack((SatConst_unkn, satconstN))
            delta_S = get_delta_adpt(X, SatConst_unkn, delta_S_default)
        else:
            delta_S = 0

        isfeas = True
        if isLinConstrained:
            isfeas = isfeas and all(Aineq.dot(z) <= bineq.flatten("c"))
        if isNLConstrained:
            isfeas = isfeas and all(g(z) <= 0)
        if isUnknownFeasibilityConstrained:
            isfeas = isfeas and Feasibility_unkn[N-1] > 0
        if isUnknownSatisfactionConstrained:
            isfeas = isfeas and SatConst_unkn[N-1] > 0

        if prefN == -1:
            I = vstack((I, [N - 1, ibest]))
            zbest = z.copy()
            ibest = N - 1
        elif prefN == 1:
            I = vstack((I, [ibest, N - 1]))
        else:
            Ieq = vstack((Ieq, [ibest, N - 1]))

        ibestseq[N-1] = ibest
        isfeas_seq[N-1] = ibest

        if RBFcalibrate and isin(N, RBFcalibrationSteps):
            theta, itheta, M, MM, iM = rbf_calibrate(X, I, Ieq, M, N, ibest, sepvalue, rbf, epsil, comparetol, display,
                                                     thetas, MM, iM, itheta)

        W = get_weights(X, I, Ieq, M, N, ibest, comparetol)

        results_display(N, z, display, nvar, scalevars, dd, d0, epsil * theta, RBFcalibrate)

        time_iter.append(time.perf_counter() - time_iter_start)
    # end while

    xopt = zbest.copy()
    if ~isUnknownFeasibilityConstrained:
        Feasibility_unkn = ones((maxevals, 1))

    if ~isUnknownSatisfactionConstrained and ~isUnknownFeasibilityConstrained:
        SatConst_unkn = ones((maxevals, 1))
    elif ~isUnknownSatisfactionConstrained and isUnknownFeasibilityConstrained:
        SatConst_unkn = Feasibility_unkn

    fes_opt_unkn = Feasibility_unkn[ibest]
    satConst_opt_unkn = SatConst_unkn[ibest]
    feas_opt_comb = isfeas_seq[ibest]

    if scalevars:
        # Scale variables back
        xopt = xopt * dd + d0
        X = X * (ones((N, 1)) * dd) + ones((N, 1)) * d0

    out = {"xopt": xopt,
           "X": X,
           "W": W,
           "M": M,
           "I": I,
           "Ieq": Ieq,
           "theta": theta,
           "Feasibility_unkn": Feasibility_unkn,
           "fes_opt_unkn": fes_opt_unkn,
           "SatConst_unkn": SatConst_unkn,
           "satConst_opt_unkn": satConst_opt_unkn,
           "isfeas_seq": isfeas_seq,
           "feas_opt_comb": feas_opt_comb,
           "ibest": ibest,
           "ibestseq": ibestseq,
           "time_iter": array(time_iter),
           "time_opt_acquisition": array(time_opt_acquisition),
           "time_fit_surrogate": array(time_fit_surrogate),
           "time_f_eval": array(time_f_eval)
           }

    return out
コード例 #7
0
    problem["nsamp"] = nsamp
    problem["svdtol"] = 1e-6
    # problem["globoptsol"] = "direct"
    problem["globoptsol"] = "pswarm"
    problem["display"] = 1

    problem["scalevars"] = 1
    problem["compare_tol"] = 1e-6

    problem["constraint_penalty"] = 1e5
    problem["feasible_sampling"] = False

    if runGLISp:
        problem["rbf"] = lambda x1, x2, epsil: 1 / (
                    1 + epsil ** 2 * vecsum((x1 - x2) ** 2, axis=-1))  # inverse quadratic
        # problem["rbf"] = lambda x1,x2,epsil: exp(-(epsil**2*vecsum((x1-x2)**2,axis=-1)) # Gaussian RBF
        # problem["rbf"] = lambda x1,x2,epsil: sqrt((1+epsil**2*vecsum((x1-x2)**2,axis=-1)) # multiquadric
        print("Running GLISp optimization:\n")
    if runGLIS:
        problem["useRBF"] = 1
        problem["alpha"] = delta/5
        problem["f"] = fun
        if problem["useRBF"]:
            epsil = .5
            def fun_rbf(x1, x2):
                return 1 / (1 + epsil ** 2 * vecsum((x1 - x2) ** 2, axis=-1))
            problem["rbf"] = fun_rbf
        print("Running GLIS optimization:\n")

    viridis = plt.cm.get_cmap('viridis', Ntests)